PLearn 0.1
NnlmOnlineLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NnlmOnlineLearner.cc
00004 //
00005 // Copyright (C) 2006 Pierre-Antoine Manzagol
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pierre-Antoine Manzagol
00036 
00040 #include "NnlmOnlineLearner.h"
00041 
00042 #include <plearn/math/PRandom.h>
00043 #include <plearn/math/TMat_maths.h>
00044 #include <plearn_learners/online/OnlineLearningModule.h>
00045 
00046 #include <plearn/vmat/VMat.h>
00047 // necessary?
00048 #include <plearn_learners_experimental/onlineNNLM/NnlmWordRepresentationLayer.h>
00049 #include <plearn_learners/online/GradNNetLayerModule.h>
00050 #include <plearn_learners/online/TanhModule.h>
00051 #include <plearn_learners/online/NLLErrModule.h>
00052 #include <plearn_learners_experimental/onlineNNLM/NnlmOutputLayer.h>
00053 
00054 #include <plearn_learners/distributions/NGramDistribution.h>
00055 #include <plearn_learners/distributions/SymbolNode.h>
00056 
00057 namespace PLearn {
00058 using namespace std;
00059 
00060 PLEARN_IMPLEMENT_OBJECT(
00061     NnlmOnlineLearner,
00062     "Trains a Neural Network Language Model.",
00063     "MULTI-LINE \nHELP");
00064 
00065 
00067 // class wordAndFreq
00070 class wordAndFreq {
00071 public:
00072   wordAndFreq(int wt, int f) : wordtag(wt), frequency(f){};
00073   int wordtag;
00074   int frequency;
00075 };
00076 bool wordAndFreqGT(const wordAndFreq &a, const wordAndFreq &b) 
00077 {
00078     return a.frequency > b.frequency;
00079 }
00080 
00082 // NnlmOnlineLearner()
00084 NnlmOnlineLearner::NnlmOnlineLearner()
00085     :   PLearner(),
00086         str_input_model( "wrl" ),
00087         str_output_model( "gaussian" ),
00088         word_representation_size( 30 ),
00089         semantic_layer_size( 100 ),
00090         wrl_slr( 0.001 ),
00091         wrl_dc( 0.0 ),
00092         wrl_wd_l1( 0.0 ),
00093         wrl_wd_l2( 0.0 ),
00094         sl_slr( 0.001 ),
00095         sl_dc( 0.0 ),
00096         sl_wd_l1( 0.0 ),
00097         sl_wd_l2( 0.0 ),
00098         str_gaussian_model_train_cost( "approx_discriminant" ),
00099         str_gaussian_model_learning( "non_discriminant" ),
00100         gaussian_model_sigma2_min(0.000001),
00101         gaussian_model_dl_slr(0.001),
00102         shared_candidates_size( 0 ),
00103         ngram_candidates_size( 50 ),
00104         self_candidates_size( 0 ),
00105         sm_slr( 0.001 ),
00106         sm_dc( 0.0 ),
00107         sm_wd_l1( 0.0 ),
00108         sm_wd_l2( 0.0 ),
00109         vocabulary_size( -1 ),
00110         context_size( -1 ),
00111         nmodules( -1 ),
00112         output_nmodules( -1 ),
00113         model_type( -1 ),
00114         gaussian_model_cost( -1 ),
00115         gaussian_model_learning( -1 )
00116 {
00117     // ### You may (or not) want to call build_() to finish building the object
00118     // ### (doing so assumes the parent classes' build_() have been called too
00119     // ### in the parent classes' constructors, something that you must ensure)
00120 
00121     random_gen = new PRandom();
00122 }
00123 
00125 // declareOptions
00127 void NnlmOnlineLearner::declareOptions(OptionList& ol)
00128 {
00129 
00130     // *** Build Options *** 
00131 
00132     // * Model type * 
00133     declareOption(ol, "str_input_model",
00134                   &NnlmOnlineLearner::str_input_model,
00135                   OptionBase::buildoption,
00136                   "Specifies what's used as input layer: wrl (default - word representation layer) or gnnl (gradnnetlayer).");
00137     declareOption(ol, "str_output_model",
00138                   &NnlmOnlineLearner::str_output_model,
00139                   OptionBase::buildoption,
00140                   "Specifies what's used on top of the semantic layer: 'softmax' or 'gaussian'(default).");
00141 
00142     // * Model size * 
00143     declareOption(ol, "word_representation_size",
00144                   &NnlmOnlineLearner::word_representation_size,
00145                   OptionBase::buildoption,
00146                   "Size of the real distributed word representation.");
00147 
00148     declareOption(ol, "semantic_layer_size",
00149                   &NnlmOnlineLearner::semantic_layer_size,
00150                   OptionBase::buildoption,
00151                   "Size of the semantic layer.");
00152 
00153     // * Same part parameters
00154     declareOption(ol, "wrl_slr",
00155                   &NnlmOnlineLearner::wrl_slr,
00156                   OptionBase::buildoption,
00157                   "Word representation layer start learning rate.");
00158     declareOption(ol, "wrl_dc",
00159                   &NnlmOnlineLearner::wrl_dc,
00160                   OptionBase::buildoption,
00161                   "Word representation layer decrease constant.");
00162     declareOption(ol, "wrl_wd_l1",
00163                   &NnlmOnlineLearner::wrl_wd_l1,
00164                   OptionBase::buildoption,
00165                   "Word representation layer L1 penalty factor.");
00166     declareOption(ol, "wrl_wd_l2",
00167                   &NnlmOnlineLearner::wrl_wd_l2,
00168                   OptionBase::buildoption,
00169                   "Word representation layer L2 penalty factor.");
00170     declareOption(ol, "sl_slr",
00171                   &NnlmOnlineLearner::sl_slr,
00172                   OptionBase::buildoption,
00173                   "Semantic layer start learning rate.");
00174     declareOption(ol, "sl_dc",
00175                   &NnlmOnlineLearner::sl_dc,
00176                   OptionBase::buildoption,
00177                   "Semantic layer decrease constant.");
00178     declareOption(ol, "sl_wd_l1",
00179                   &NnlmOnlineLearner::sl_wd_l1,
00180                   OptionBase::buildoption,
00181                   "Semantic layer L1 penalty factor.");
00182     declareOption(ol, "sl_wd_l2",
00183                   &NnlmOnlineLearner::sl_wd_l2,
00184                   OptionBase::buildoption,
00185                   "Semantic layer L2 penalty factor.");
00186 
00187 
00188     // * Gaussian model specific
00189 
00190     // - model behavior
00191     // TODO how about combining the two costs: maybe jumpstart with one
00192     declareOption(ol, "str_gaussian_model_train_cost",
00193                   &NnlmOnlineLearner::str_gaussian_model_train_cost,
00194                   OptionBase::buildoption,
00195                   "In case of a gaussian output module, specifies the cost used for training (i a word, r a semantic layer representation) : 'discriminant' (computes p(i|r) exactly, with full computation of normalizer), 'approx_discriminant' (default - uses some candidate words for normalization) or 'non_discriminant' (uses p(r|i)).");
00196 
00197     declareOption(ol, "str_gaussian_model_learning",
00198                   &NnlmOnlineLearner::str_gaussian_model_learning,
00199                   OptionBase::buildoption,
00200                   "In case of a gaussian output module, specifies the learning technique: 'discriminant' or 'non_discriminant' (default - evaluates empirical mu and sigma).");
00201 
00202     declareOption(ol, "gaussian_model_sigma2_min",
00203                   &NnlmOnlineLearner::gaussian_model_sigma2_min,
00204                   OptionBase::buildoption,
00205                   "In case of a gaussian output module, specifies the minimal sigma^2.");
00206 
00207     declareOption(ol, "gaussian_model_dl_slr",
00208                   &NnlmOnlineLearner::gaussian_model_dl_slr,
00209                   OptionBase::buildoption,
00210                   "In case of a gaussian output module with discriminant learning, this specifies the starting learning rate.");
00211 
00212     declareOption(ol, "gaussian_model_dl_dc",
00213                   &NnlmOnlineLearner::gaussian_model_dl_dc,
00214                   OptionBase::buildoption,
00215                   "In case of a gaussian output module with discriminant learning, this specifies the decrease constant.");
00216 
00217     // - Candidate set sizes
00218     declareOption(ol, "shared_candidates_size",
00219                   &NnlmOnlineLearner::shared_candidates_size,
00220                   OptionBase::buildoption,
00221                   "Number of candidates drawn from frequent words in aproximate discriminant cost evaluation.");
00222 
00223     declareOption(ol, "ngram_candidates_size",
00224                   &NnlmOnlineLearner::ngram_candidates_size,
00225                   OptionBase::buildoption,
00226                   "Number of candidates drawn from the context (using bigram) in aproximated discriminant cost evaluation.");
00227 
00228     declareOption(ol, "self_candidates_size",
00229                   &NnlmOnlineLearner::self_candidates_size,
00230                   OptionBase::buildoption,
00231                   "Number of candidates drawn from the nnlm in aproximated discriminant cost evaluation  (evaluated periodically). NOT IMPLEMENTED!!");
00232 
00233     // - Ngram (for evaluating ngram candidates) train set
00234     declareOption(ol, "ngram_train_set",
00235                   &NnlmOnlineLearner::ngram_train_set,
00236                   OptionBase::buildoption,
00237                   "Train set used for training the bigram used in the evaluation of the set of candidate words used for normalization   in the evaluated discriminant cost (ProcessSymbolicSequenceVMatrix) (ONLY BIGRAMS).");
00238 
00239     // * Softmax specific
00240 
00241     declareOption(ol, "sm_slr",
00242                   &NnlmOnlineLearner::sm_slr,
00243                   OptionBase::buildoption,
00244                   "Softmax layer start learning rate.");
00245     declareOption(ol, "sm_dc",
00246                   &NnlmOnlineLearner::sm_dc,
00247                   OptionBase::buildoption,
00248                   "Softmax layer decrease constant.");
00249     declareOption(ol, "sm_wd_l1",
00250                   &NnlmOnlineLearner::sm_wd_l1,
00251                   OptionBase::buildoption,
00252                   "Softmax layer L1 penalty factor.");
00253     declareOption(ol, "sm_wd_l2",
00254                   &NnlmOnlineLearner::sm_wd_l2,
00255                   OptionBase::buildoption,
00256                   "Softmax layer L2 penalty factor.");
00257 
00258 
00259     // *** Learnt Options *** 
00260 
00261     declareOption(ol, "modules", &NnlmOnlineLearner::modules,
00262                   OptionBase::buildoption,
00263                   "Layers of the learner");
00264 
00265     declareOption(ol, "output_modules", &NnlmOnlineLearner::output_modules,
00266                   OptionBase::buildoption,
00267                   "Output layers");
00268 
00269     // TODO Are there missing things here?
00270 
00271     // Now call the parent class' declareOptions
00272     inherited::declareOptions(ol);
00273 }
00274 
00276 // build
00278 void NnlmOnlineLearner::build()
00279 {
00280     inherited::build();
00281     build_();
00282 }
00283 
00284 
00286 // build_
00288 void NnlmOnlineLearner::build_()
00289 {
00290     cout << "NnlmOnlineLearner::build_()" << endl;
00291 
00292     if( !train_set )  {
00293         return;
00294     }
00295 
00296     // *** Sanity Checks ***
00297     // *** Sanity Checks ***
00298     /*int word_representation_size
00299     int semantic_layer_size
00300     real wrl_slr;
00301     real wrl_dc;
00302     real wrl_wd_l1;
00303     real wrl_wd_l2;
00304     real sl_slr;
00305     real sl_dc;
00306     real sl_wd_l1;
00307     real sl_wd_l2;
00308     real gaussian_model_sigma2_min
00309     int shared_candidates_size;
00310     int ngram_candidates_size;
00311     int self_candidates_size;
00312     real sm_slr;
00313     real sm_dc;
00314     real sm_wd_l1;
00315     real sm_wd_l2;*/
00316 
00317 
00318     // *** Determine Model ***
00319     // *** Determine Model  ***
00320 
00321     // * Model type *
00322     string mt = lowerstring( str_output_model );
00323     if(  mt == "gaussian" || mt == "" )  {
00324         model_type = MODEL_TYPE_GAUSSIAN;
00325     } else if( mt == "softmax" )  {
00326         model_type = MODEL_TYPE_SOFTMAX;
00327     } else  {
00328         PLERROR( "'%s' model type is unknown.\n", mt.c_str() );
00329     }
00330 
00331 
00332     if( model_type == MODEL_TYPE_GAUSSIAN ) {
00333 
00334         // * Gaussian model cost *
00335         string gmc = lowerstring( str_gaussian_model_train_cost );
00336         if( gmc == "approx_discriminant" || gmc == "" )  {
00337             gaussian_model_cost = GAUSSIAN_COST_APPROX_DISCR;
00338         } else if( gmc == "non_discriminant" )  {
00339             gaussian_model_cost = GAUSSIAN_COST_NON_DISCR;
00340         } else if( gmc == "discriminant" )  {
00341             gaussian_model_cost = GAUSSIAN_COST_DISCR;
00342         } else  {
00343             PLERROR( "'%s' gaussian model train cost is unknown.\n", gmc.c_str() );
00344         }
00345 
00346         // * Gaussian model learning *
00347         string gml = lowerstring( str_gaussian_model_learning );
00348         if( gml == "non_discriminant" || gml == "" )  {
00349             gaussian_model_learning = GAUSSIAN_LEARNING_EMPIRICAL;
00350         } else if( gml == "discriminant" )  {
00351             gaussian_model_learning = GAUSSIAN_LEARNING_DISCR;
00352         } else  {
00353             PLERROR( "'%s' gaussian model learning is unknown.\n", gml.c_str() );
00354         }
00355     }
00356 
00357 
00358     // *** Vocabulary size ***
00359     // *** Vocabulary size ***
00360 
00361     // the train set's dictionary_size +1 for the 'OOV' tag (tag 0) +1 for the 'missing' tag (tag 'dict_size+1')
00362     vocabulary_size = (train_set->getDictionary(0))->size()+2;
00363 
00364     if( verbosity > 0 ) {
00365         cout << "\tvocabulary_size = " << vocabulary_size << endl;
00366     }
00367 
00368     // Ensure MINIMAL dictionary coherence, ie size, with ngram set
00369     if( model_type == MODEL_TYPE_GAUSSIAN ) {
00370         if( vocabulary_size != (ngram_train_set->getDictionary(0))->size()+2 )  {
00371             PLERROR("train_set and ngram_train_set have dictionaries of different sizes.\n");
00372         }
00373     }
00374 
00375 
00376     // *** Context size ***
00377     // *** Context size ***
00378 
00379     // The ProcessSymbolicSequenceVMatrix has only input. Last input is used as target.
00380     context_size = inputsize()-1;
00381 
00382     if( verbosity > 0 ) {
00383         cout << "\tcontext_size = " << context_size << endl;
00384     }
00385 
00386 
00387     // *** Build modules and output_module ***
00388     // *** Build modules and output_module ***
00389     buildLayers();
00390 
00391     cout << "NnlmOnlineLearner::build_() - DONE!" << endl;
00392 }
00393 
00394 
00396 
00398 // buildLayers
00400 void NnlmOnlineLearner::buildLayers()
00401 {
00402 
00403     // *** Do we have to build the layers, or did we load them? ***
00404 
00405     if( nmodules <= 0 ) {
00406 
00407         //------------------------------------------
00408         // 1) Fixed part - up to the semantic layer
00409         //------------------------------------------
00410         nmodules = 3;
00411         modules.resize( nmodules );
00412 
00413         // *** First layer ***
00414         string ilm = lowerstring( str_input_model );
00415         if( ilm == "wrl" || ilm == "" )  {
00416             // *** Word representation layer ***
00417             // *** Word representation layer ***
00418             PP< NnlmWordRepresentationLayer > p_wrl = new NnlmWordRepresentationLayer();
00419 
00420             p_wrl->input_size = context_size;
00421             p_wrl->output_size = context_size * word_representation_size;
00422 
00423             p_wrl->start_learning_rate = wrl_slr;
00424             p_wrl->decrease_constant = wrl_dc;
00425             //TODO
00426             //p_wrl->L1_penalty_factor = wrl_wd_l1;
00427             //p_wrl->L2_penalty_factor = wrl_wd_l2;
00428             p_wrl->vocabulary_size = vocabulary_size;
00429             p_wrl->word_representation_size = word_representation_size;
00430             p_wrl->context_size = context_size;
00431             p_wrl->random_gen = random_gen;
00432 
00433             modules[0] = p_wrl;
00434 
00435         } else if( ilm == "gnnl" )  {
00436             PP< GradNNetLayerModule > p_nnl = new GradNNetLayerModule();
00437 
00438             p_nnl->input_size = inputsize();
00439             p_nnl->output_size = inputsize() * word_representation_size;
00440 
00441             p_nnl->start_learning_rate = wrl_slr;
00442             p_nnl->decrease_constant = wrl_dc;
00443             p_nnl->L1_penalty_factor = wrl_wd_l1;
00444             p_nnl->L2_penalty_factor = wrl_wd_l2;
00445 
00446             p_nnl->init_weights_random_scale=sqrt(p_nnl->input_size);
00447             p_nnl->random_gen = random_gen;
00448 
00449             modules[0] = p_nnl;
00450 
00451         } else  {
00452             PLERROR( "'%s' input layer model is unknown.\n", ilm.c_str() );
00453         }
00454 
00455 
00456 
00457         // *** GradNNetLayer ***
00458         // *** GradNNetLayer ***
00459         PP< GradNNetLayerModule > p_nnl = new GradNNetLayerModule();
00460 
00461         p_nnl->input_size = context_size * word_representation_size;
00462         p_nnl->output_size = semantic_layer_size;
00463 
00464         p_nnl->start_learning_rate = sl_slr;
00465         p_nnl->decrease_constant = sl_dc;
00466         p_nnl->L1_penalty_factor = sl_wd_l1;
00467         p_nnl->L2_penalty_factor = sl_wd_l2;
00468         p_nnl->init_weights_random_scale=3.0*sqrt(p_nnl->input_size);
00469         p_nnl->random_gen = random_gen;
00470 
00471         modules[1] = p_nnl;
00472 
00473 
00474         // *** Tanh layer ***
00475         // *** Tanh layer ***
00476         PP< TanhModule > p_thm = new TanhModule();
00477 
00478         p_thm->input_size = semantic_layer_size;
00479         p_thm->output_size = semantic_layer_size;
00480 
00481         modules[2] = p_thm;
00482 
00483 
00484         //------------------------------------------
00485         // 2) Variable part - over semantic layer
00486         //------------------------------------------
00487 
00488         if( model_type == MODEL_TYPE_GAUSSIAN )  {
00489 
00490             output_nmodules = 1;
00491             output_modules.resize( output_nmodules );
00492 
00493 
00494             // *** NnlmOutputLayer ***
00495             PP< NnlmOutputLayer > p_nol = new NnlmOutputLayer();
00496 
00497             p_nol->input_size = semantic_layer_size;
00498             p_nol->output_size = 1;
00499             // the missing tag does NOT get an output (never is the target)
00500             p_nol->target_cardinality = vocabulary_size-1;
00501             p_nol->sigma2min = gaussian_model_sigma2_min;
00502             p_nol->context_cardinality = vocabulary_size;
00503             p_nol->dl_start_learning_rate = 0.0001;
00504             //TODO Set cost and learning 
00505             //int gaussian_model_cost;
00506             //int gaussian_model_learning;
00507 
00508             output_modules[0] = p_nol;
00509             output_modules[0]->build();
00510 
00511         } else {
00512 
00513             output_nmodules = 2;
00514             output_modules.resize( output_nmodules );
00515 
00516             // *** GradNNetLayer ***
00517             // *** GradNNetLayer ***
00518             PP< GradNNetLayerModule > p_sm_nnl = new GradNNetLayerModule();
00519 
00520             p_sm_nnl->input_size = semantic_layer_size;  
00521             // the missing tag does NOT get an output (never is the target)
00522             p_sm_nnl->output_size = vocabulary_size-1;
00523 
00524             p_sm_nnl->start_learning_rate = sm_slr;
00525             p_sm_nnl->decrease_constant = sm_dc;
00526             p_sm_nnl->L1_penalty_factor = sm_wd_l1;
00527             p_sm_nnl->L2_penalty_factor = sm_wd_l2;
00528             p_sm_nnl->init_weights_random_scale=3.0*sqrt(p_sm_nnl->input_size);
00529             p_sm_nnl->random_gen = random_gen;
00530 
00531             output_modules[0] = p_sm_nnl;
00532             output_modules[0]->build();
00533 
00534 
00535             // *** Softmax ***
00536             output_modules[1] = new NLLErrModule();
00537             // the missing tag does NOT get an output (never is the target)
00538             output_modules[1]->input_size = vocabulary_size-1;
00539             output_modules[1]->output_size = 1;
00540 
00541             output_modules[1]->build();
00542 
00543             //
00544             output_values.resize( 1 );
00545             output_gradients.resize( 1 );
00546             // TODO should improve this
00547             // +1 so we can add the target in the last spot
00548             output_values[0].resize( vocabulary_size );
00549             output_gradients[0].resize( vocabulary_size-1 );
00550         }
00551     } 
00552 
00553     // ***  Check on layer size compatibilities, resize values and gradients, and build ***
00554     // ***  Check on layer size compatibilities, resize values and gradients, and build ***
00555     // TODO Right now we simply check up to the semantic layer. And we don't check compatibility
00556     // with context_size and word_representation_size and semantic_layer_size.
00557 
00558     // variables
00559     values.resize( nmodules+1 );
00560     gradients.resize( nmodules+1 );
00561 
00562     // first values will be "input" values
00563     int size = context_size;
00564     values[0].resize( size );
00565     gradients[0].resize( size );
00566 
00567     for( int i=0 ; i<nmodules ; i++ )
00568     {
00569         PP<OnlineLearningModule> p_module = modules[i];
00570 
00571         if( p_module->input_size != size )
00572         {
00573             PLWARNING( "NnlmOnlineLearner::buildLayers(): module '%d'\n"
00574                        "has an input size of '%d', but previous layer's output"
00575                        " size\n"
00576                        "is '%d'. Resizing module '%d'.\n",
00577                        i, p_module->input_size, size, i);
00578             p_module->input_size = size;
00579         }
00580 
00581         p_module->estimate_simpler_diag_hessian = true;
00582 
00583         p_module->build();
00584 
00585         size = p_module->output_size;
00586         values[i+1].resize( size );
00587         gradients[i+1].resize( size );
00588     }
00589 
00590     // *** Gaussian Model ***
00591     // *** Gaussian Model ***
00592 
00593     if( model_type == MODEL_TYPE_GAUSSIAN )  {
00594 
00595         // * Build candidates
00596         if( gaussian_model_cost == GAUSSIAN_COST_APPROX_DISCR )  {
00597             buildCandidates();
00598         }
00599 
00600         // * Set 
00601         PP<NnlmOutputLayer> p_nol;
00602         if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
00603         {
00604             PLERROR("NnlmOnlineLearner::build_() - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
00605         }
00606 
00607         // TODO clean this
00608         // point to the same place
00609         p_nol->shared_candidates = shared_candidates;
00610         p_nol->candidates = candidates;
00611 
00612         // TODO Set learning method - discriminant or non-discriminant
00613         p_nol->setLearning(gaussian_model_learning);
00614 
00615         // Set Cost 
00616         if( gaussian_model_cost == GAUSSIAN_COST_APPROX_DISCR ) {
00617             p_nol->setCost(GAUSSIAN_COST_APPROX_DISCR);
00618         } else if( gaussian_model_cost == GAUSSIAN_COST_NON_DISCR ) {
00619             p_nol->setCost(GAUSSIAN_COST_NON_DISCR);
00620         } else { //GAUSSIAN_COST_DISCR
00621             p_nol->setCost(GAUSSIAN_COST_DISCR);
00622         }
00623 
00624         //evaluateGaussianCounts();
00625         //reevaluateGaussianParameters();
00626         // * 
00627 
00628         // Not here, because forget will be called after and it resets mus and sigmas
00629         // Initialize mus and sigmas using 1 pass
00630         //reevaluateGaussianParameters();
00631 
00632 
00633         // ### Should only be evaluated once
00634         //p_nol->sumI << p_nol->test_sumI;
00635         //p_nol->s_sumI = p_nol->test_s_sumI;
00636 
00637     }
00638 
00639 
00640 }
00641 
00643 // buildCandidates
00645 // TODO use higher order ngrams to build candidates. The present limitation is only on the candidates data structure.
00646 void NnlmOnlineLearner::buildCandidates()
00647 {
00648     if( model_type != MODEL_TYPE_GAUSSIAN )  {
00649         PLWARNING("NnlmOnlineLearner::buildCandidates() - model is not of gaussian type. Ignoring call.\n");
00650         return;
00651     }
00652 
00653     // *** Train ngram ***
00654     // *** Train ngram ***
00655 
00656     cout << "NnlmOnlineLearner::buildCandidates()" << endl;
00657     cout << "\ttraining ngram..." << endl;
00658     theNGram = new NGramDistribution();
00659 
00660     theNGram->n = ngram_train_set->inputsize();
00661     theNGram->smoothing = "no_smoothing";
00662     theNGram->nan_replace = true;
00663     theNGram->setTrainingSet( ngram_train_set );
00664     //theNGram->build(); Done in setTrainingSet
00665 
00666     theNGram->train();
00667 
00668 
00669     // *** Effective building ***
00670     // *** Effective building ***
00671 
00672     cout << "\tbuilding candidates..." << endl;
00673 
00674     shared_candidates.resize( shared_candidates_size );
00675     candidates.resize( vocabulary_size );
00676 
00677     std::vector< wordAndFreq > tmp;
00678     // temporary list containing the shared candidates
00679     list<int> l_tmp_shared_candidates;
00680     list<int>::iterator itr_tmp_shared_candidates;
00681 
00682 
00683     // * Determine most frequent words and so the shared_candidates
00684     TVec<int> unigram( 1 );
00685     TVec<int> unifreq( 1 );
00686 
00687     // wt means "word tag"
00688     // Note -> wt=vocabulary_size-1 corresponds to the (-1) tag in the NGramDistribution
00689     // we skip this tag, the 'missing' tag
00690     // NOTE Is this appropriate treatment?
00691     // I don't see how the missing values could occur anywhere except at the beginning so yes.
00692     for(int wt=0; wt<vocabulary_size-1; wt++)  {
00693         unigram[0] = wt;
00694         unifreq = (theNGram->tree)->freq(unigram);
00695         tmp.push_back( wordAndFreq(wt, unifreq[0]) );
00696     }
00697 
00698     std::sort(tmp.begin(), tmp.end(), wordAndFreqGT);
00699 
00700     //cout << "These are the shared candidates:" << endl;
00701 
00702     // HACK we don't check if itr has hit the end... unlikely vocabulary_size is smaller
00703     // than shared_candidates_size
00704     std::vector< wordAndFreq >::iterator itr_vec;
00705     itr_vec=tmp.begin();
00706     for(int i=0; i< shared_candidates_size; i++) {
00707 
00708         cout << (train_set->getDictionary(0))->getSymbol( itr_vec->wordtag ) << "\t";
00709 
00710         shared_candidates[i] = itr_vec->wordtag;
00711         l_tmp_shared_candidates.push_back(itr_vec->wordtag);
00712         itr_vec++;
00713     }
00714 
00715     tmp.clear();
00716 
00717     cout << endl;
00718 
00719 
00720 
00721     // * Add best candidates according to a bigram
00722     // wt means "word tag"
00723     // Note -> wt=vocabulary_size-1 corresponds to the (-1) tag in the NGramDistribution
00724     // we skip this tag, the 'missing' tag
00725     // NOTE Is this appropriate treatment?
00726     map<int, int> frequenciesCopy;
00727     map<int,int>::iterator itr;
00728     int n_candidates;
00729 
00730     for(int wt=-1; wt<vocabulary_size-1; wt++)  {
00731 
00732         // - fill list of candidates, then sort
00733         PP<SymbolNode> node = ((theNGram->tree)->getRoot())->child(wt);
00734         if(node)  {
00735             frequenciesCopy = node->getFrequencies();
00736 
00737             itr = frequenciesCopy.begin();
00738             while( itr != frequenciesCopy.end() ) {
00739                 // -1 is the NGram's missing tag, our vocabulary_size-1 tag
00740                 // Actually, we should not see it as a follower to anything except itself...
00741                 if( itr->first != -1) {
00742                     tmp.push_back( wordAndFreq( itr->first, itr->second ) );
00743                 } else  {
00744                     tmp.push_back( wordAndFreq( vocabulary_size-1, itr->second ) );
00745                 }
00746                 itr++;
00747             }
00748             std::sort(tmp.begin(), tmp.end(), wordAndFreqGT);
00749 
00750             // - resize candidates entry
00751             if( ngram_candidates_size < (int) tmp.size() )  {
00752                 n_candidates = ngram_candidates_size; 
00753             } else  {
00754                 n_candidates = tmp.size();
00755             }
00756 
00757             if(wt!=-1)  {
00758                 candidates[wt].resize( n_candidates );
00759             } else  {
00760                 candidates[ vocabulary_size-1 ].resize( n_candidates );
00761             }
00762 
00763             // - fill candidates entry
00764 
00765             itr_vec=tmp.begin();
00766             for(int i=0; i< n_candidates; i++) {
00767                 //cout << (train_set->getDictionary(0))->getSymbol( itr_vec->wordtag ) << "\t";
00768 
00769                 // ONLY ADD IF NOT IN THE SHARED CANDIDATES
00770                 // Search the list.
00771                 itr_tmp_shared_candidates = find( l_tmp_shared_candidates.begin(), l_tmp_shared_candidates.end(), itr_vec->wordtag);
00772 
00773                 // if not found -> add it
00774                 if (itr_tmp_shared_candidates == l_tmp_shared_candidates.end())
00775                 {
00776 if( itr_vec->wordtag > vocabulary_size -1 )
00777   cout << "NnlmOnlineLearner::buildCandidates() - problem " << itr_vec->wordtag <<endl;
00778 
00779                     if(wt!=-1)  {
00780                         candidates[wt][i] = itr_vec->wordtag;
00781                     } else  {
00782                         candidates[ vocabulary_size-1 ][i] = itr_vec->wordtag;
00783                     }
00784                 // compensate for not adding this word
00785                 } else  {
00786                     i--;
00787                     n_candidates--;
00788                 }
00789                 itr_vec++;
00790             }
00791             // compensate for not adding words
00792             if(wt!=-1)  {
00793                 candidates[wt].resize( n_candidates );
00794             } else  {
00795                 candidates[ vocabulary_size-1 ].resize( n_candidates );
00796             }
00797 
00798             tmp.clear();
00799         }
00800     }
00801     l_tmp_shared_candidates.clear();
00802 
00803 }
00804 
00806 // evaluateGaussianCounts
00808 /*void NnlmOnlineLearner::evaluateGaussianCounts() const
00809 {
00810 
00811     if( model_type != MODEL_TYPE_GAUSSIAN )  {
00812         PLWARNING( "NnlmOnlineLearner::evaluateGaussianCounts(): not a gaussian model. Ignoring call.\n");
00813         return;
00814     }
00815 
00816     Vec input( inputsize()-1 );
00817     Vec target( 1 );
00818     real weight;
00819     Vec output( outputsize() );   // the output of the semantic layer
00820     int nsamples = train_set->length();
00821 
00822     cout << "Evaluating gaussian counts..." << endl;
00823 
00824     PP<NnlmOutputLayer> p_nol;
00825     if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
00826     {
00827         PLERROR("NnlmOnlineLearner::evaluateGaussianCounts() - output_modules[0] is not an NnlmOutputLayer");
00828     }
00829 
00830     p_nol->resetClassCounts();
00831 
00832     // * Compute stats
00833     for( int sample=0 ; sample < nsamples ; sample++ )
00834     {
00835         myGetExample(train_set, sample, input, target, weight );
00836 
00837         p_nol->incrementClassCount( (int) target[0]);
00838     }
00839 
00840     // * Apply values 
00841     p_nol->applyClassCounts();
00842 
00843 
00844 
00845 }
00846 */
00848 // reevaluateGaussianParameters
00851 void NnlmOnlineLearner::reevaluateGaussianParameters() const
00852 {
00853     cout << "Evaluating gaussian parameters..." << endl;
00854 
00855     if( model_type != MODEL_TYPE_GAUSSIAN )  {
00856         PLWARNING( "NnlmOnlineLearner::reevaluateGaussianParameters(): not a gaussian model. Ignoring call.\n");
00857         return;
00858     }
00859 
00860     Vec input( inputsize()-1 );
00861     Vec target( 1 );
00862     real weight;
00863     Vec output( outputsize() );   // the output of the semantic layer
00864     int nsamples = train_set->length();
00865 
00866     PP<NnlmOutputLayer> p_nol;
00867     if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
00868     {
00869         PLERROR("NnlmOnlineLearner::reevaluateGaussianParameters() - output_modules[0] is not an NnlmOutputLayer");
00870     }
00871 
00872     p_nol->resetAllClassVars();
00873 
00874     // * Compute stats
00875     for( int sample=0 ; sample < nsamples ; sample++ )
00876     {
00877         myGetExample(train_set, sample, input, target, weight );
00878 
00879         // * fprop
00880         computeOutput(input, output);
00881 
00882         //p_nol->setTarget( (int) target[0]);
00883         //p_nol->setContext( (int) input[ (inputsize()-2) ] );
00884 
00885         p_nol->updateClassVars((int) target[0], output);
00886     }
00887 
00888     // * Apply values 
00889     p_nol->applyAllClassVars();
00890 
00891 
00892 }
00893 
00895 
00897 // makeDeepCopyFromShallowCopy
00899 void NnlmOnlineLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00900 {
00901     inherited::makeDeepCopyFromShallowCopy(copies);
00902 
00903     deepCopyField(modules, copies);
00904     deepCopyField(values, copies);
00905     deepCopyField(gradients, copies);
00906 
00907     deepCopyField(output_modules, copies);
00908     deepCopyField(output_values, copies);
00909     deepCopyField(output_gradients, copies);
00910 
00911     // ### How about these?
00912     //ngram_train_set
00913     //theNGram
00914     //shared_candidates
00915     //candidates
00916 
00917 }
00918 
00919 
00921 // outputsize
00924 int NnlmOnlineLearner::outputsize() const
00925 {
00926     if( nmodules < 0 || values.length() <= nmodules )
00927         return -1;
00928     else
00929         return values[ nmodules ].length();
00930 }
00931 
00932 //--------------------------------------------------------------------------------------------------
00933 //--------------------------------------------------------------------------------------------------
00934 //--------------------------------------------------------------------------------------------------
00935 
00937 // forget
00939 void NnlmOnlineLearner::forget()
00940 {
00941     inherited::forget();
00942 
00943     // reset inputs
00944     values[0].clear();
00945     gradients[0].clear();
00946     // reset modules and outputs
00947     for( int i=0 ; i<nmodules ; i++ )
00948     {
00949         modules[i]->forget();
00950         values[i+1].clear();
00951         gradients[i+1].clear();
00952     }
00953 
00954     if( model_type == MODEL_TYPE_SOFTMAX )  {
00955         output_values[0].clear();
00956         output_gradients[0].clear();
00957     }
00958     for( int i=0 ; i<output_nmodules; i++ )
00959     {
00960         output_modules[i]->forget();
00961     }
00962 
00963     stage = 0;
00964 }
00965 
00967 // myGetExample
00969 
00970 // Had trouble interfacing with ProcessSymbolicSequenceVMatrix's getExample.
00971 // In particular, a source matrix of inputsize 1, targetsize 0, weightsize 0
00972 // used in a ProcessSymb of leftcontext 3 would return an input of 4 and a
00973 // target of size 0, even though its inputsize was set to 3 and targetsize to 1
00974 //
00975 void NnlmOnlineLearner::myGetExample(const VMat& example_set, int& sample, Vec& input, Vec& target,
00976 real& weight) const
00977 {
00978     static Vec row;
00979     // the actual inputsize is (inputsize()-1) and targetsize() is 1
00980     row.resize( inputsize() + weightsize() );
00981 
00982     example_set->getRow( sample, row);
00983 
00984     input << row.subVec( 0, inputsize()-1 );
00985     target << row.subVec( inputsize()-1, 1 );
00986     weight = 1.0;
00987     if( weightsize() )  {
00988         weight = row[ inputsize() ];
00989     }
00990 
00991     // *** SHOULD BE DONE IN PRETREATMENT!!! -> but we have a ProcessSymbolicSequenceVMatrix...
00992     // * Replace nan in input by '(train_set->getDictionary(0))->size()+1', 
00993     // the missing value tag
00994     for( int i=0 ; i < inputsize()-1 ; i++ ) {
00995       if( is_missing(input[i]) )  {
00996         input[i] = vocabulary_size - 1;
00997       }
00998     }
00999     // * Replace a 'nan' in the target by OOV
01000     // this nan should not be missing data (seeing the train_set is a
01001     // ProcessSymbolicSequenceVMatrix)
01002     // but the word "nan", ie "Mrs Nan said she would blabla"
01003     // *** Problem however - current vocabulary is full for train_set,
01004     // ie we train OOV on nan-word instances.
01005     // DO a pretreatment to replace Nan by *Nan* or something like it
01006     if( is_missing(target[0]) ) {
01007         target[0] = 0;
01008     }
01009     // *** SHOULD BE DONE IN PRETREATMENT!!!
01010 
01011     // set target for the nllerrmodule
01012     if( model_type == MODEL_TYPE_SOFTMAX )  {
01013         output_values[0][vocabulary_size-1]=target[0];
01014     }
01015 
01016 }
01017 
01019 //jusqu'a a la fin
01020 
01022 // train
01024 void NnlmOnlineLearner::train()
01025 {
01026     if (!initTrain())
01027         return;
01028 
01029     Vec input( inputsize()-1 );
01030     Vec target( 1 );
01031     real weight;
01032     Vec output( outputsize() );   // the output of the semantic layer
01033     Vec train_costs( getTrainCostNames().length() );
01034     Vec out_gradient(1,1); // the gradient wrt the cost is '1'
01035     Vec gradient( semantic_layer_size );
01036     int nsamples = train_set->length();
01037 
01038     // Initialize mus and sigmas using 1 pass
01039     reevaluateGaussianParameters();
01040 
01041     if(stage==0)  {
01042         PP<NnlmOutputLayer> p_nol;
01043         if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01044         {
01045             PLERROR("NnlmOnlineLearner::train() - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01046         }
01047 
01048         p_nol->computeEmpiricalLearningRateParameters();
01049     }
01050 
01051     if( model_type == MODEL_TYPE_GAUSSIAN )  {
01052 
01053         PP<NnlmOutputLayer> p_nol;
01054         if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01055         {
01056             PLERROR("NnlmOnlineLearner::train() - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01057         }
01058 
01059         p_nol->is_learning = true;
01060 
01061     }
01062 
01063 
01064 //---------------
01065 /*    PP<GradNNetLayerModule> p_gnn;
01066     if( !(p_gnn = dynamic_cast<GradNNetLayerModule*>( (OnlineLearningModule*) modules[1] ) ) )
01067     {
01068         PLERROR("NnlmOnlineLearner::train - modules[1] is not a GradNNetLayerModule");
01069     }
01070     p_gnn->printVariance();*/
01071 //---------------
01072 
01073     PP<ProgressBar> pb;
01074     if(report_progress) {
01075         pb = new ProgressBar("Training", nsamples);
01076     }
01077 
01078     // *** For stages ***
01079     for( ; stage < nstages ; stage++ )
01080     {
01081 
01082         if(report_progress) {
01083             cout << "*** Stage " << stage << " ***" << endl;
01084             //cout << "uniform_mixture_coeff " << output_modules[0]->umc << " " << 1 - output_modules[0]->umc<< endl;
01085         }
01086 
01087 
01088 
01089         if( model_type == MODEL_TYPE_GAUSSIAN )  {
01090 
01091             PP<NnlmOutputLayer> p_nol;
01092             if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01093             {
01094                 PLERROR("NnlmOnlineLearner::train() - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01095             }
01096 
01097             cout << "global_mu " << p_nol->global_mu << endl;
01098             cout << "global_sigma2 " << p_nol->global_sigma2 << endl;
01099         }
01100 
01101 
01102 
01103 
01104 
01105 
01106 
01107         // * clear stats of previous epoch *
01108         train_stats->forget();
01109 
01110         // * for examples *
01111         for( int sample=0 ; sample < nsamples ; sample++ )
01112         {
01113 
01114             if(report_progress)
01115                 pb->update(sample);
01116 
01117             // - Get example -
01118             myGetExample(train_set, sample, input, target, weight );
01119 
01120             // - Fixed part fprop -
01121             computeOutput(input, output);
01122 
01123             // - Variable part fprop - cost and gradient for this part -
01124             // (we don't want to duplicate some computations in gaussian model gradient evaluation)
01125             // In gaussian case, gradients[nmodules] is computed here.
01126             computeTrainCostsFromOutputs(input, output, target, train_costs );
01127 
01128             // - bpropUpdate -
01129 
01130             // Variable part
01131             if( model_type == MODEL_TYPE_GAUSSIAN )  {
01132 
01133                 PP<NnlmOutputLayer> p_nol;
01134                 if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01135                 {
01136                     PLERROR("NnlmOnlineLearner::train() - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01137                 }
01138 
01139                 if( gaussian_model_cost == GAUSSIAN_COST_APPROX_DISCR )  {
01140                     output_modules[0]->bpropUpdate( output, train_costs.subVec(1,1), out_gradient );
01141                     gradients[nmodules] << p_nol->ad_gradient;
01142                 } else {  //if( gaussian_model_cost == GAUSSIAN_COST_NON_DISCR )
01143                     output_modules[0]->bpropUpdate( output, train_costs.subVec(0,1), out_gradient );
01144                     gradients[nmodules] << p_nol->nd_gradient;
01145                 }
01146 
01147             } else  {
01148                 output_modules[1]->bpropUpdate( output_values[0], train_costs, output_gradients[0], out_gradient );
01149                 output_modules[0]->bpropUpdate( output, output_values[0].subVec( 0, vocabulary_size-1 ), gradients[nmodules], output_gradients[0] );
01150             }
01151 
01152             // Fixed (common to both models) part
01153             for( int i=nmodules-1 ; i>0 ; i-- ) {
01154                 modules[i]->bpropUpdate( values[i], values[i+1], gradients[i], gradients[i+1] );
01155             }
01156             modules[0]->bpropUpdate( values[0], values[1], gradients[1] );
01157 
01158 
01159             // - Update stats -
01160             train_stats->update( train_costs );
01161 
01162         }// * for examples - END
01163 
01164         train_stats->finalize(); // finalize statistics for this epoch
01165 
01166 
01167     // Initialize mus and sigmas using 1 pass
01168     reevaluateGaussianParameters();
01169 
01170 
01171     }// *** For stages - END
01172 
01173     if( model_type == MODEL_TYPE_GAUSSIAN )  {
01174 
01175         PP<NnlmOutputLayer> p_nol;
01176         if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01177         {
01178             PLERROR("NnlmOnlineLearner::train() - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01179         }
01180 
01181         p_nol->is_learning = false;
01182     }
01183 
01184 }
01185 
01186 
01188 // test
01190 void NnlmOnlineLearner::test(VMat testset, PP<VecStatsCollector> test_stats,
01191                     VMat testoutputs, VMat testcosts) const
01192 {
01193 
01194     Vec input( inputsize()-1 );
01195     Vec output( outputsize() );
01196     Vec target( 1 );
01197     real weight;
01198     Vec test_costs( getTestCostNames().length() );
01199     real entropy = 0.0;
01200     real perplexity = 0.0;
01201     int nsamples = testset->length();
01202 
01203 
01204     // * Empty test set: we give -1 cost arbitrarily.
01205     if (nsamples == 0) {
01206         test_costs.fill(-1);
01207         test_stats->update(test_costs);
01208     }
01209 
01210     if( stage == 0 )  {
01211         // Initialize mus and sigmas using 1 pass
01212         reevaluateGaussianParameters();
01213     }
01214 
01215 
01216     // * TODO Should we do this?
01217     //reevaluateGaussianParameters();
01218 
01219     if( model_type == MODEL_TYPE_GAUSSIAN )  {
01220 
01221         PP<NnlmOutputLayer> p_nol;
01222         if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01223         {
01224             PLERROR("NnlmOnlineLearner::train() - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01225         }
01226 
01227         cout << "global_mu " << p_nol->global_mu << endl;
01228         cout << "global_sigma2 " << p_nol->global_sigma2 << endl;
01229     }
01230 
01231 
01232 
01233     PP<ProgressBar> pb;
01234     if(report_progress)
01235         pb = new ProgressBar("Testing learner",nsamples);
01236 
01237     for( int sample=0 ; sample < nsamples ; sample++ )
01238     {
01239         myGetExample(testset, sample, input, target, weight );
01240 
01241         // Always call computeOutputAndCosts, since this is better
01242         // behaved with stateful learners
01243         computeOutputAndCosts(input, target, output, test_costs);
01244 
01245         if(testoutputs)
01246             testoutputs->putOrAppendRow(sample,output);
01247 
01248         if(testcosts)
01249             testcosts->putOrAppendRow(sample, test_costs);
01250 
01251         if(test_stats)
01252             test_stats->update(test_costs,weight);
01253 
01254         if(report_progress)
01255             pb->update(sample);
01256 
01257         entropy += test_costs[0];
01258 
01259         // Do some outputing
01260         // Do some outputing
01261         if( sample < 50 ) {
01262             cout << "---> ";
01263             for( int i=0; i<inputsize()-1; i++)  {
01264                 if( (int)input[i] == vocabulary_size - 1) {
01265                     cout << "\\missing\\ ";
01266                 } else  {
01267                     cout << (testset->getDictionary(0))->getSymbol( (int)input[i] ) << " ";
01268                 }
01269             }
01270             cout << "\t\t " << (testset->getDictionary(0))->getSymbol( (int)target[0] ) << " p(t|r) " << safeexp( - test_costs[0] ) << endl;
01271 
01272             if( model_type == MODEL_TYPE_GAUSSIAN )  {
01273                 PP<NnlmOutputLayer> p_nol;
01274                 if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01275                 {
01276                     PLERROR("NnlmOnlineLearner::test - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01277                 }
01278                 Vec candidates, probabilities;
01279                 p_nol->getBestCandidates(output, candidates, probabilities);
01280                 for(int i=0; i<candidates.size(); i++)  {
01281                     cout << "\t" << (testset->getDictionary(0))->getSymbol( (int)candidates[i] ) << " " << probabilities[i] << endl;
01282                 }
01283             }
01284         }
01285         // Do some outputing - END
01286         // Do some outputing - END
01287 
01288     }
01289 
01290     entropy /= nsamples;
01291     perplexity = safeexp(entropy);
01292 
01293     cout << "entropy: " << entropy << " perplexity " << perplexity << endl;
01294 
01295 }
01296 
01297 
01299 // computeOutput
01301 void NnlmOnlineLearner::computeOutput(const Vec& input, Vec& output) const
01302 {
01303 //cout << "************************************" << endl;
01304 
01305     // fprop
01306     values[0] << input;
01307     for( int i=0 ; i<nmodules ; i++ ) {
01308         modules[i]->fprop( values[i], values[i+1] );
01309 
01310 //cout << "-= " << i << " =-" << endl;
01311 //cout << values[i] << endl;        
01312     }
01313 //cout << "-= " << nmodules << " =-" << endl;
01314 //cout <<values[ nmodules ] << endl;
01315 
01316     // 
01317     output.resize( outputsize() );
01318     output << values[ nmodules ];
01319 
01320 }
01321 
01322 
01324 // computeTrainCostsAndGradientsFromOutputs
01326 // Compute costs. In gaussian case, compute gradient wrt ouput.
01327 void NnlmOnlineLearner::computeTrainCostsFromOutputs(const Vec& input, const Vec& output,
01328                                            const Vec& target, Vec& costs) const
01329 {
01330 
01331     if( model_type == MODEL_TYPE_GAUSSIAN )  {
01332 
01333         PP<NnlmOutputLayer> p_nol;
01334         if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01335         {
01336             PLERROR("NnlmOnlineLearner::computeTrainCostsAndGradientsFromOutputs - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01337         }
01338 
01339         p_nol->setTarget( (int) target[0] );
01340         p_nol->setContext( (int) input[ (int) (inputsize()-2) ] );
01341 
01342         p_nol->fprop( output, costs );
01343 
01344     } else  {
01345         Vec example_cost(1);
01346         // don't give the target to the gradnnetlayermodule
01347         Vec bob(vocabulary_size-1);
01348 /*
01349     Vec out_tgt = output.copy();
01350     out_tgt.append( target );
01351     for( int i=0 ; i<ncosts ; i++ )
01352     {
01353         Vec cost(1);
01354         cost_modules[i]->fprop( out_tgt, cost );
01355         costs[i] = cost[0];
01356     }
01357 
01358 */
01359         //output_modules[0]->fprop( output, output_values[0].subVec( 0, vocabulary_size-1 ) );
01360 
01361         // output_values[0][vocabulary_size-1] contains the target index myGetExample
01362         output_modules[0]->fprop( output, bob );
01363         output_values[0].subVec( 0, vocabulary_size-1 ) << bob;
01364         output_modules[1]->fprop( output_values[0], example_cost);
01365 
01366         costs[0] = example_cost[0];
01367     }
01368 
01369 
01370 }
01371 
01372 
01375 // Compute the costs from *already* computed output.
01376 // TODO should not iterate over the vocabulary. Properly set output'state and call fprop.
01377 void NnlmOnlineLearner::computeCostsFromOutputs(const Vec& input, const Vec& output,
01378                                            const Vec& target, Vec& costs) const
01379 {
01380     if( model_type == MODEL_TYPE_GAUSSIAN )  {
01381 
01382         PP<NnlmOutputLayer> p_nol;
01383         if( !(p_nol = dynamic_cast<NnlmOutputLayer*>( (OnlineLearningModule*) output_modules[0] ) ) )
01384         {
01385             PLERROR("NnlmOnlineLearner::computeCostsFromOutputs - MODEL_TYPE_GAUSSIAN but output_modules[0] is not an NnlmOutputLayer");
01386         }
01387 
01388         p_nol->setCost(GAUSSIAN_COST_DISCR);
01389         p_nol->setTarget( (int)target[0] );
01390         p_nol->fprop( output, costs);
01391 
01392         // Re-Set Cost 
01393         if( gaussian_model_cost == GAUSSIAN_COST_APPROX_DISCR ) {
01394             p_nol->setCost(GAUSSIAN_COST_APPROX_DISCR);
01395         } else  { //GAUSSIAN_COST_NON_DISCR
01396             p_nol->setCost(GAUSSIAN_COST_NON_DISCR);
01397         }
01398 
01399     } else  {
01400         Vec example_cost(1);
01401 
01402         Vec bob(vocabulary_size-1);
01403 
01404         output_modules[0]->fprop( output, bob );
01405         output_values[0].subVec( 0, vocabulary_size-1 ) << bob;
01406         // output_values[0][vocabulary_size-1] contains the target index from myGetExample
01407         output_modules[1]->fprop( output_values[0], example_cost);
01408 
01409         costs[0] = example_cost[0];
01410     }
01411 
01412 }
01413 
01414 
01416 // getTestCostNames
01418 TVec<string> NnlmOnlineLearner::getTestCostNames() const
01419 {
01420     // Return the names of the costs computed by computeCostsFromOutputs
01421     // (these may or may not be exactly the same as what's returned by
01422     // getTrainCostNames).
01423     TVec<string> ret;
01424     ret.resize(1);
01425     ret[0] = "NLL";
01426     return ret;
01427 }
01428 
01429 
01431 // getTrainCostNames
01433 TVec<string> NnlmOnlineLearner::getTrainCostNames() const
01434 {
01435     // Return the names of the objective costs that the train method computes
01436     // and for which it updates the VecStatsCollector train_stats
01437     // (these may or may not be exactly the same as what's returned by
01438     // getTestCostNames).
01439     TVec<string> ret;
01440 
01441     if( model_type == MODEL_TYPE_GAUSSIAN )  {
01442         ret.resize(2);
01443         ret[0] = "non_discriminant";
01444         ret[1] = "approx_discriminant";
01445     } else  {
01446         ret.resize(1);
01447         ret[0] = "NLL";
01448     }
01449 
01450     return ret;
01451 }
01452 
01453 
01454 } // end of namespace PLearn
01455 
01456 
01457 /*
01458   Local Variables:
01459   mode:c++
01460   c-basic-offset:4
01461   c-file-style:"stroustrup"
01462   c-file-offsets:((innamespace . 0)(inline-open . 0))
01463   indent-tabs-mode:nil
01464   fill-column:79
01465   End:
01466 */
01467 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines