PLearn 0.1
DynamicallyLinkedRBMsModel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DynamicallyLinkedRBMsModel.cc
00004 //
00005 // Copyright (C) 2006 Stanislas Lauly
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Stanislas Lauly
00036 
00040 #define PL_LOG_MODULE_NAME "DynamicallyLinkedRBMsModel"
00041 #include <plearn/io/pl_log.h>
00042 
00043 #include "DynamicallyLinkedRBMsModel.h"
00044 #include "plearn/math/plapack.h"
00045 
00046 // - commiter mse
00047 // - ajouter denoising recurrent net. Deux possibilités:
00048 //   1) on ajoute du bruit à l'input, et on reconstruit les targets avec des poids
00049 //      possiblement différents
00050 //     * option denoising_target_layers_weights (c'est là qu'on met l'input)
00051 //     * version de clamp_units qui ajoute le bruit
00052 //   2) on reconstruit l'input directement (sans 2e couche cachée)
00053 //     * toujours clamp_units qui ajoute le bruit
00054 //     * une option qui dit quelle partie de l'input reconstruire et du code 
00055 //       pour bloquer le gradient qui ne doit pas passer (pas très propre, 
00056 //       mais bon...)
00057 //     * une option donnant les connections de reconstruction
00058 //     * du code pour entraîner séparément les hidden_connections (si présentes)
00059 // - pourrait avoir le gradient du denoising recurrent net en même temps que
00060 //   celui du "fine-tuning"
00061 // - add dynamic_activations_list and use it in recurrent_update
00062 
00063 
00064 namespace PLearn {
00065 using namespace std;
00066 
00067 PLEARN_IMPLEMENT_OBJECT(
00068     DynamicallyLinkedRBMsModel,
00069     "Model made of RBMs linked through time",
00070     ""
00071     );
00072 
00073 
00074 DynamicallyLinkedRBMsModel::DynamicallyLinkedRBMsModel() :
00075     //rbm_learning_rate( 0.01 ),
00076     recurrent_net_learning_rate( 0.01),
00077     use_target_layers_masks( false ),
00078     end_of_sequence_symbol( -1000 )
00079     //rbm_nstages( 0 ),
00080 {
00081     random_gen = new PRandom();
00082 }
00083 
00084 void DynamicallyLinkedRBMsModel::declareOptions(OptionList& ol)
00085 {
00086 //    declareOption(ol, "rbm_learning_rate", &DynamicallyLinkedRBMsModel::rbm_learning_rate,
00087 //                  OptionBase::buildoption,
00088 //                  "The learning rate used during RBM contrastive "
00089 //                  "divergence learning phase.\n");
00090 
00091     declareOption(ol, "recurrent_net_learning_rate", 
00092                   &DynamicallyLinkedRBMsModel::recurrent_net_learning_rate,
00093                   OptionBase::buildoption,
00094                   "The learning rate used during the recurrent phase.\n");
00095 
00096 //    declareOption(ol, "rbm_nstages", &DynamicallyLinkedRBMsModel::rbm_nstages,
00097 //                  OptionBase::buildoption,
00098 //                  "Number of epochs for rbm phase.\n");
00099 
00100 
00101     declareOption(ol, "target_layers_weights", 
00102                   &DynamicallyLinkedRBMsModel::target_layers_weights,
00103                   OptionBase::buildoption,
00104                   "The training weights of each target layers.\n");
00105 
00106     declareOption(ol, "use_target_layers_masks", 
00107                   &DynamicallyLinkedRBMsModel::use_target_layers_masks,
00108                   OptionBase::buildoption,
00109                   "Indication that a mask indicating which target to predict\n"
00110                   "is present in the input part of the VMatrix dataset.\n");
00111 
00112     declareOption(ol, "end_of_sequence_symbol", 
00113                   &DynamicallyLinkedRBMsModel::end_of_sequence_symbol,
00114                   OptionBase::buildoption,
00115                   "Value of the first input component for end-of-sequence "
00116                   "delimiter.\n");
00117 
00118     declareOption(ol, "input_layer", &DynamicallyLinkedRBMsModel::input_layer,
00119                   OptionBase::buildoption,
00120                   "The input layer of the model.\n");
00121 
00122     declareOption(ol, "target_layers", &DynamicallyLinkedRBMsModel::target_layers,
00123                   OptionBase::buildoption,
00124                   "The target layers of the model.\n");
00125 
00126     declareOption(ol, "hidden_layer", &DynamicallyLinkedRBMsModel::hidden_layer,
00127                   OptionBase::buildoption,
00128                   "The hidden layer of the model.\n");
00129 
00130     declareOption(ol, "hidden_layer2", &DynamicallyLinkedRBMsModel::hidden_layer2,
00131                   OptionBase::buildoption,
00132                   "The second hidden layer of the model (optional).\n");
00133 
00134     declareOption(ol, "dynamic_connections", 
00135                   &DynamicallyLinkedRBMsModel::dynamic_connections,
00136                   OptionBase::buildoption,
00137                   "The RBMConnection between the first hidden layers, "
00138                   "through time (optional).\n");
00139 
00140     declareOption(ol, "hidden_connections", 
00141                   &DynamicallyLinkedRBMsModel::hidden_connections,
00142                   OptionBase::buildoption,
00143                   "The RBMConnection between the first and second "
00144                   "hidden layers (optional).\n");
00145 
00146     declareOption(ol, "input_connections", 
00147                   &DynamicallyLinkedRBMsModel::input_connections,
00148                   OptionBase::buildoption,
00149                   "The RBMConnection from input_layer to hidden_layer.\n");
00150 
00151     declareOption(ol, "target_connections", 
00152                   &DynamicallyLinkedRBMsModel::target_connections,
00153                   OptionBase::buildoption,
00154                   "The RBMConnection from input_layer to hidden_layer.\n");
00155 
00156     /*
00157     declareOption(ol, "", 
00158                   &DynamicallyLinkedRBMsModel::,
00159                   OptionBase::buildoption,
00160                   "");
00161     */
00162 
00163 
00164     declareOption(ol, "target_layers_n_of_target_elements", 
00165                   &DynamicallyLinkedRBMsModel::target_layers_n_of_target_elements,
00166                   OptionBase::learntoption,
00167                   "Number of elements in the target part of a VMatrix associated\n"
00168                   "to each target layer.\n");
00169 
00170     declareOption(ol, "input_symbol_sizes", 
00171                   &DynamicallyLinkedRBMsModel::input_symbol_sizes,
00172                   OptionBase::learntoption,
00173                   "Number of symbols for each symbolic field of train_set.\n");
00174 
00175     declareOption(ol, "target_symbol_sizes", 
00176                   &DynamicallyLinkedRBMsModel::target_symbol_sizes,
00177                   OptionBase::learntoption,
00178                   "Number of symbols for each symbolic field of train_set.\n");
00179 
00180     /*
00181     declareOption(ol, "", &DynamicallyLinkedRBMsModel::,
00182                   OptionBase::learntoption,
00183                   "");
00184      */
00185 
00186     // Now call the parent class' declareOptions
00187     inherited::declareOptions(ol);
00188 }
00189 
00190 void DynamicallyLinkedRBMsModel::build_()
00191 {
00192     // ### This method should do the real building of the object,
00193     // ### according to set 'options', in *any* situation.
00194     // ### Typical situations include:
00195     // ###  - Initial building of an object from a few user-specified options
00196     // ###  - Building of a "reloaded" object: i.e. from the complete set of
00197     // ###    all serialised options.
00198     // ###  - Updating or "re-building" of an object after a few "tuning"
00199     // ###    options have been modified.
00200     // ### You should assume that the parent class' build_() has already been
00201     // ### called.
00202 
00203     MODULE_LOG << "build_() called" << endl;
00204 
00205     if(train_set)
00206     {
00207         PLASSERT( target_layers_weights.length() == target_layers.length() );
00208         PLASSERT( target_connections.length() == target_layers.length() );
00209         PLASSERT( target_layers.length() > 0 );
00210         PLASSERT( input_layer );
00211         PLASSERT( hidden_layer );
00212         PLASSERT( input_connections );
00213 
00214         // Parsing symbols in input
00215         int input_layer_size = 0;
00216         input_symbol_sizes.resize(0);
00217         PP<Dictionary> dict;
00218         int inputsize_without_masks = inputsize() 
00219             - ( use_target_layers_masks ? targetsize() : 0 );
00220         for(int i=0; i<inputsize_without_masks; i++)
00221         {
00222             dict = train_set->getDictionary(i);
00223             if(dict)
00224             {
00225                 if( dict->size() == 0 )
00226                     PLERROR("DynamicallyLinkedRBMsModel::build_(): dictionary "
00227                         "of field %d is empty", i);
00228                 input_symbol_sizes.push_back(dict->size());
00229                 // Adjust size to include one-hot vector
00230                 input_layer_size += dict->size();
00231             }
00232             else
00233             {
00234                 input_symbol_sizes.push_back(-1);
00235                 input_layer_size++;
00236             }
00237         }
00238 
00239         if( input_layer->size != input_layer_size )
00240             PLERROR("DynamicallyLinkedRBMsModel::build_(): input_layer->size %d "
00241                     "should be %d", input_layer->size, input_layer_size);
00242 
00243         // Parsing symbols in target
00244         int tar_layer = 0;
00245         int tar_layer_size = 0;
00246         target_symbol_sizes.resize(target_layers.length());
00247         for( int tar_layer=0; tar_layer<target_layers.length(); 
00248              tar_layer++ )
00249             target_symbol_sizes[tar_layer].resize(0);
00250         target_layers_n_of_target_elements.resize( targetsize() );
00251         target_layers_n_of_target_elements.clear();
00252 
00253         for( int tar=0; tar<targetsize(); tar++)
00254         {
00255             if( tar_layer > target_layers.length() )
00256                 PLERROR("DynamicallyLinkedRBMsModel::build_(): target layers "
00257                         "does not cover all targets.");            
00258 
00259             dict = train_set->getDictionary(tar+inputsize());
00260             if(dict)
00261             {
00262                 if( use_target_layers_masks )
00263                     PLERROR("DynamicallyLinkedRBMsModel::build_(): masks for "
00264                             "symbolic targets is not implemented.");
00265                 if( dict->size() == 0 )
00266                     PLERROR("DynamicallyLinkedRBMsModel::build_(): dictionary "
00267                             "of field %d is empty", tar);
00268 
00269                 target_symbol_sizes[tar_layer].push_back(dict->size());
00270                 target_layers_n_of_target_elements[tar_layer]++;
00271                 tar_layer_size += dict->size();
00272             }
00273             else
00274             {
00275                 target_symbol_sizes[tar_layer].push_back(-1);
00276                 target_layers_n_of_target_elements[tar_layer]++;
00277                 tar_layer_size++;
00278             }
00279 
00280             if( target_layers[tar_layer]->size == tar_layer_size )
00281             {
00282                 tar_layer++;
00283                 tar_layer_size = 0;
00284             }
00285         }
00286 
00287         if( tar_layer != target_layers.length() )
00288             PLERROR("DynamicallyLinkedRBMsModel::build_(): target layers "
00289                     "does not cover all targets.");
00290 
00291 
00292         // Building weights and layers
00293         if( !input_layer->random_gen )
00294         {
00295             input_layer->random_gen = random_gen;
00296             input_layer->forget();
00297         }
00298 
00299         if( !hidden_layer->random_gen )
00300         {
00301             hidden_layer->random_gen = random_gen;
00302             hidden_layer->forget();
00303         }
00304 
00305         input_connections->down_size = input_layer->size;
00306         input_connections->up_size = hidden_layer->size;
00307         if( !input_connections->random_gen )
00308         {
00309             input_connections->random_gen = random_gen;
00310             input_connections->forget();
00311         }
00312         input_connections->build();
00313 
00314 
00315         if( dynamic_connections )
00316         {
00317             dynamic_connections->down_size = hidden_layer->size;
00318             dynamic_connections->up_size = hidden_layer->size;
00319             if( !dynamic_connections->random_gen )
00320             {
00321                 dynamic_connections->random_gen = random_gen;
00322                 dynamic_connections->forget();
00323             }
00324             dynamic_connections->build();
00325         }
00326 
00327         if( hidden_layer2 )
00328         {
00329             if( !hidden_layer2->random_gen )
00330             {
00331                 hidden_layer2->random_gen = random_gen;
00332                 hidden_layer2->forget();
00333             }
00334 
00335             PLASSERT( hidden_connections );
00336 
00337             hidden_connections->down_size = hidden_layer->size;
00338             hidden_connections->up_size = hidden_layer2->size;
00339             if( !hidden_connections->random_gen )
00340             {
00341                 hidden_connections->random_gen = random_gen;
00342                 hidden_connections->forget();
00343             }
00344             hidden_connections->build();
00345         }
00346 
00347         for( int tar_layer = 0; tar_layer < target_layers.length(); tar_layer++ )
00348         {
00349             PLASSERT( target_layers[tar_layer] );
00350             PLASSERT( target_connections[tar_layer] );
00351 
00352             if( !target_layers[tar_layer]->random_gen )
00353             {
00354                 target_layers[tar_layer]->random_gen = random_gen;
00355                 target_layers[tar_layer]->forget();
00356             }
00357 
00358             if( hidden_layer2 )
00359                 target_connections[tar_layer]->down_size = hidden_layer2->size;
00360             else
00361                 target_connections[tar_layer]->down_size = hidden_layer->size;
00362 
00363             target_connections[tar_layer]->up_size = target_layers[tar_layer]->size;
00364             if( !target_connections[tar_layer]->random_gen )
00365             {
00366                 target_connections[tar_layer]->random_gen = random_gen;
00367                 target_connections[tar_layer]->forget();
00368             }
00369             target_connections[tar_layer]->build();
00370         }
00371 
00372     }
00373 }
00374 
00375 // ### Nothing to add here, simply calls build_
00376 void DynamicallyLinkedRBMsModel::build()
00377 {
00378     inherited::build();
00379     build_();
00380 }
00381 
00382 
00383 void DynamicallyLinkedRBMsModel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00384 {
00385     inherited::makeDeepCopyFromShallowCopy(copies);
00386 
00387     deepCopyField( input_layer, copies);
00388     deepCopyField( target_layers , copies);
00389     deepCopyField( hidden_layer, copies);
00390     deepCopyField( hidden_layer2 , copies);
00391     deepCopyField( dynamic_connections , copies);
00392     deepCopyField( hidden_connections , copies);
00393     deepCopyField( input_connections , copies);
00394     deepCopyField( target_connections , copies);
00395     deepCopyField( target_layers_n_of_target_elements, copies);
00396     deepCopyField( input_symbol_sizes, copies);
00397     deepCopyField( target_symbol_sizes, copies);
00398     
00399 
00400     deepCopyField( bias_gradient , copies);
00401     deepCopyField( visi_bias_gradient , copies);
00402     deepCopyField( hidden_gradient , copies);
00403     deepCopyField( hidden_temporal_gradient , copies);
00404     deepCopyField( hidden_list , copies);
00405     deepCopyField( hidden_act_no_bias_list , copies);
00406     deepCopyField( hidden2_list , copies);
00407     deepCopyField( hidden2_act_no_bias_list , copies);
00408     deepCopyField( target_prediction_list , copies);
00409     deepCopyField( target_prediction_act_no_bias_list , copies);
00410     deepCopyField( input_list , copies);
00411     deepCopyField( targets_list , copies);
00412     deepCopyField( nll_list , copies);
00413     deepCopyField( masks_list , copies);
00414     deepCopyField( dynamic_act_no_bias_contribution, copies);
00415 
00416 
00417     // deepCopyField(, copies);
00418 
00419     //PLERROR("DynamicallyLinkedRBMsModel::makeDeepCopyFromShallowCopy(): "
00420     //"not implemented yet");
00421 }
00422 
00423 
00424 int DynamicallyLinkedRBMsModel::outputsize() const
00425 {
00426     int out_size = 0;
00427     for( int i=0; i<target_layers.length(); i++ )
00428         out_size += target_layers[i]->size;
00429     return out_size;
00430 }
00431 
00432 void DynamicallyLinkedRBMsModel::forget()
00433 {
00434     inherited::forget();
00435 
00436     input_layer->forget();
00437     hidden_layer->forget();
00438     input_connections->forget();
00439     if( dynamic_connections )
00440         dynamic_connections->forget();
00441     if( hidden_layer2 )
00442     {
00443         hidden_layer2->forget();
00444         hidden_connections->forget();
00445     }
00446 
00447     for( int i=0; i<target_layers.length(); i++ )
00448     {
00449         target_layers[i]->forget();
00450         target_connections[i]->forget();
00451     }
00452 
00453     stage = 0;
00454 }
00455 
00456 void DynamicallyLinkedRBMsModel::train()
00457 {
00458     MODULE_LOG << "train() called " << endl;
00459 
00460     Vec input( inputsize() );
00461     Vec target( targetsize() );
00462     real weight = 0; // Unused
00463     Vec train_costs( getTrainCostNames().length() );
00464     train_costs.clear();
00465     Vec train_n_items( getTrainCostNames().length() );
00466 
00467     if( !initTrain() )
00468     {
00469         MODULE_LOG << "train() aborted" << endl;
00470         return;
00471     }
00472 
00473     ProgressBar* pb = 0;
00474 
00475     // clear stats of previous epoch
00476     train_stats->forget();
00477 
00478 
00479     /***** RBM training phase *****/
00480 //    if(rbm_stage < rbm_nstages)
00481 //    {
00482 //    }
00483 
00484 
00485     /***** Recurrent phase *****/
00486     if( stage >= nstages )
00487         return;
00488 
00489     if( stage < nstages )
00490     {        
00491 
00492         MODULE_LOG << "Training the whole model" << endl;
00493 
00494         int init_stage = stage;
00495         //int end_stage = max(0,nstages-(rbm_nstages + dynamic_nstages));
00496         int end_stage = nstages;
00497 
00498         MODULE_LOG << "  stage = " << stage << endl;
00499         MODULE_LOG << "  end_stage = " << end_stage << endl;
00500         MODULE_LOG << "  recurrent_net_learning_rate = " << recurrent_net_learning_rate << endl;
00501 
00502         if( report_progress && stage < end_stage )
00503             pb = new ProgressBar( "Recurrent training phase of "+classname(),
00504                                   end_stage - init_stage );
00505 
00506         setLearningRate( recurrent_net_learning_rate );
00507 
00508         int ith_sample_in_sequence = 0;
00509         int inputsize_without_masks = inputsize() 
00510             - ( use_target_layers_masks ? targetsize() : 0 );
00511         int sum_target_elements = 0;
00512         while(stage < end_stage)
00513         {
00514 /*
00515                 TMat<real> U,V;//////////crap James
00516                 TVec<real> S;
00517                 U.resize(hidden_layer->size,hidden_layer->size);
00518                 V.resize(hidden_layer->size,hidden_layer->size);
00519                 S.resize(hidden_layer->size);
00520                 U << dynamic_connections->weights;
00521                 
00522                 SVD(U,dynamic_connections->weights,S,V);
00523                 S.fill(-0.5);
00524                 productScaleAcc(dynamic_connections->bias,dynamic_connections->weights,S,1,0);
00525 */
00526             train_costs.clear();
00527             train_n_items.clear();
00528             for(int sample=0 ; sample<train_set->length() ; sample++ )
00529             {
00530                 train_set->getExample(sample, input, target, weight);
00531 
00532                 if( fast_exact_is_equal(input[0],end_of_sequence_symbol) )
00533                 {
00534                     //update
00535                     recurrent_update();
00536                     
00537                     ith_sample_in_sequence = 0;
00538                     hidden_list.resize(0);
00539                     hidden_act_no_bias_list.resize(0);
00540                     hidden2_list.resize(0);
00541                     hidden2_act_no_bias_list.resize(0);
00542                     target_prediction_list.resize(0);
00543                     target_prediction_act_no_bias_list.resize(0);
00544                     input_list.resize(0);
00545                     targets_list.resize(0);
00546                     nll_list.resize(0,0);
00547                     masks_list.resize(0);
00548                     continue;
00549                 }
00550 
00551                 // Resize internal variables
00552                 hidden_list.resize(ith_sample_in_sequence+1);
00553                 hidden_act_no_bias_list.resize(ith_sample_in_sequence+1);
00554                 if( hidden_layer2 )
00555                 {
00556                     hidden2_list.resize(ith_sample_in_sequence+1);
00557                     hidden2_act_no_bias_list.resize(ith_sample_in_sequence+1);
00558                 }
00559                  
00560                 input_list.resize(ith_sample_in_sequence+1);
00561                 input_list[ith_sample_in_sequence].resize(input_layer->size);
00562 
00563                 targets_list.resize( target_layers.length() );
00564                 target_prediction_list.resize( target_layers.length() );
00565                 target_prediction_act_no_bias_list.resize( target_layers.length() );
00566                 for( int tar=0; tar < target_layers.length(); tar++ )
00567                 {
00568                     if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00569                     {                        
00570                         targets_list[tar].resize( ith_sample_in_sequence+1);
00571                         targets_list[tar][ith_sample_in_sequence].resize( 
00572                             target_layers[tar]->size);
00573                         target_prediction_list[tar].resize(
00574                             ith_sample_in_sequence+1);
00575                         target_prediction_act_no_bias_list[tar].resize(
00576                             ith_sample_in_sequence+1);
00577                     }
00578                 }
00579                 nll_list.resize(ith_sample_in_sequence+1,target_layers.length());
00580                 if( use_target_layers_masks )
00581                 {
00582                     masks_list.resize( target_layers.length() );
00583                     for( int tar=0; tar < target_layers.length(); tar++ )
00584                         if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00585                             masks_list[tar].resize( ith_sample_in_sequence+1 );
00586                 }
00587 
00588                 // Forward propagation
00589 
00590                 // Fetch right representation for input
00591                 clamp_units(input.subVec(0,inputsize_without_masks),
00592                             input_layer,
00593                             input_symbol_sizes);                
00594                 input_list[ith_sample_in_sequence] << input_layer->expectation;
00595 
00596                 // Fetch right representation for target
00597                 sum_target_elements = 0;
00598                 for( int tar=0; tar < target_layers.length(); tar++ )
00599                 {
00600                     if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00601                     {
00602                         if( use_target_layers_masks )
00603                         {
00604                             clamp_units(target.subVec(
00605                                             sum_target_elements,
00606                                             target_layers_n_of_target_elements[tar]),
00607                                         target_layers[tar],
00608                                         target_symbol_sizes[tar],
00609                                         input.subVec(
00610                                             inputsize_without_masks 
00611                                             + sum_target_elements, 
00612                                             target_layers_n_of_target_elements[tar]),
00613                                         masks_list[tar][ith_sample_in_sequence]
00614                                 );
00615                             
00616                         }
00617                         else
00618                         {
00619                             clamp_units(target.subVec(
00620                                             sum_target_elements,
00621                                             target_layers_n_of_target_elements[tar]),
00622                                         target_layers[tar],
00623                                         target_symbol_sizes[tar]);
00624                         }
00625                         targets_list[tar][ith_sample_in_sequence] << 
00626                             target_layers[tar]->expectation;
00627                     }
00628                     sum_target_elements += target_layers_n_of_target_elements[tar];
00629                 }
00630                 
00631                 input_connections->fprop( input_list[ith_sample_in_sequence], 
00632                                           hidden_act_no_bias_list[ith_sample_in_sequence]);
00633                 
00634                 if( ith_sample_in_sequence > 0 && dynamic_connections )
00635                 {
00636                     dynamic_connections->fprop( 
00637                         hidden_list[ith_sample_in_sequence-1],
00638                         dynamic_act_no_bias_contribution );
00639 
00640                     hidden_act_no_bias_list[ith_sample_in_sequence] += 
00641                         dynamic_act_no_bias_contribution;
00642                 }
00643                  
00644                 hidden_layer->fprop( hidden_act_no_bias_list[ith_sample_in_sequence], 
00645                                      hidden_list[ith_sample_in_sequence] );
00646                  
00647                 if( hidden_layer2 )
00648                 {
00649                     hidden_connections->fprop( 
00650                         hidden_list[ith_sample_in_sequence],
00651                         hidden2_act_no_bias_list[ith_sample_in_sequence]);
00652 
00653                     hidden_layer2->fprop( 
00654                         hidden2_act_no_bias_list[ith_sample_in_sequence],
00655                         hidden2_list[ith_sample_in_sequence] 
00656                         );
00657 
00658                     for( int tar=0; tar < target_layers.length(); tar++ )
00659                     {
00660                         if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00661                         {
00662                             target_connections[tar]->fprop(
00663                                 hidden2_list[ith_sample_in_sequence],
00664                                 target_prediction_act_no_bias_list[tar][
00665                                     ith_sample_in_sequence]
00666                                 );
00667                             target_layers[tar]->fprop(
00668                                 target_prediction_act_no_bias_list[tar][
00669                                     ith_sample_in_sequence],
00670                                 target_prediction_list[tar][
00671                                     ith_sample_in_sequence] );
00672                             if( use_target_layers_masks )
00673                                 target_prediction_list[tar][ ith_sample_in_sequence] *= 
00674                                     masks_list[tar][ith_sample_in_sequence];
00675                         }
00676                     }
00677                 }
00678                 else
00679                 {
00680                     for( int tar=0; tar < target_layers.length(); tar++ )
00681                     {
00682                         if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00683                         {
00684                             target_connections[tar]->fprop(
00685                                 hidden_list[ith_sample_in_sequence],
00686                                 target_prediction_act_no_bias_list[tar][
00687                                     ith_sample_in_sequence]
00688                                 );
00689                             target_layers[tar]->fprop(
00690                                 target_prediction_act_no_bias_list[tar][
00691                                     ith_sample_in_sequence],
00692                                 target_prediction_list[tar][
00693                                     ith_sample_in_sequence] );
00694                             if( use_target_layers_masks )
00695                                 target_prediction_list[tar][ ith_sample_in_sequence] *= 
00696                                     masks_list[tar][ith_sample_in_sequence];
00697                         }
00698                     }
00699                 }
00700 
00701                 sum_target_elements = 0;
00702                 for( int tar=0; tar < target_layers.length(); tar++ )
00703                 {
00704                     if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00705                     {
00706                         target_layers[tar]->activation << 
00707                             target_prediction_act_no_bias_list[tar][
00708                                 ith_sample_in_sequence];
00709                         target_layers[tar]->activation += target_layers[tar]->bias;
00710                         target_layers[tar]->setExpectation(
00711                             target_prediction_list[tar][
00712                                 ith_sample_in_sequence]);
00713                         nll_list(ith_sample_in_sequence,tar) = 
00714                             target_layers[tar]->fpropNLL( 
00715                                 targets_list[tar][ith_sample_in_sequence] ); 
00716                         train_costs[tar] += nll_list(ith_sample_in_sequence,tar);
00717                         
00718                         // Normalize by the number of things to predict
00719                         if( use_target_layers_masks )
00720                         {
00721                             train_n_items[tar] += sum(
00722                                 input.subVec( inputsize_without_masks 
00723                                               + sum_target_elements, 
00724                                               target_layers_n_of_target_elements[tar]) );
00725                         }
00726                         else
00727                             train_n_items[tar]++;
00728                     }
00729                     if( use_target_layers_masks )
00730                         sum_target_elements += 
00731                             target_layers_n_of_target_elements[tar];
00732                     
00733                 }
00734                 ith_sample_in_sequence++;
00735             }
00736             if( pb )
00737                 pb->update( stage + 1 - init_stage);
00738             
00739             for(int i=0; i<train_costs.length(); i++)
00740             {
00741                 if( !fast_exact_is_equal(target_layers_weights[i],0) )
00742                     train_costs[i] /= train_n_items[i];
00743                 else
00744                     train_costs[i] = MISSING_VALUE;
00745             }
00746 
00747             if(verbosity>0)
00748                 cout << "mean costs at stage " << stage << 
00749                     " = " << train_costs << endl;
00750             stage++;
00751             train_stats->update(train_costs);
00752         }    
00753         if( pb )
00754         {
00755             delete pb;
00756             pb = 0;
00757         }
00758 
00759     }
00760 
00761 
00762     train_stats->finalize();
00763 }
00764 
00765 
00766 
00767 void DynamicallyLinkedRBMsModel::clamp_units(const Vec layer_vector,
00768                                              PP<RBMLayer> layer,
00769                                              TVec<int> symbol_sizes) const
00770 {
00771     int it = 0;
00772     int ss = -1;
00773     for(int i=0; i<layer_vector.length(); i++)
00774     {
00775         ss = symbol_sizes[i];
00776         // If input is a real ...
00777         if(ss < 0) 
00778         {
00779             layer->expectation[it++] = layer_vector[i];
00780         }
00781         else // ... or a symbol
00782         {
00783             // Convert to one-hot vector
00784             layer->expectation.subVec(it,ss).clear();
00785             layer->expectation[it+(int)layer_vector[i]] = 1;
00786             it += ss;
00787         }
00788     }
00789     layer->setExpectation( layer->expectation );
00790 }
00791 
00792 void DynamicallyLinkedRBMsModel::clamp_units(const Vec layer_vector,
00793                                              PP<RBMLayer> layer,
00794                                              TVec<int> symbol_sizes,
00795                                              const Vec original_mask,
00796                                              Vec& formated_mask) const
00797 {
00798     int it = 0;
00799     int ss = -1;
00800     PLASSERT( original_mask.length() == layer_vector.length() );
00801     formated_mask.resize(layer->size);
00802     for(int i=0; i<layer_vector.length(); i++)
00803     {
00804         ss = symbol_sizes[i];
00805         // If input is a real ...
00806         if(ss < 0) 
00807         {
00808             formated_mask[it] = original_mask[i];
00809             layer->expectation[it++] = layer_vector[i];
00810         }
00811         else // ... or a symbol
00812         {
00813             // Convert to one-hot vector
00814             layer->expectation.subVec(it,ss).clear();
00815             formated_mask.subVec(it,ss).fill(original_mask[i]);
00816             layer->expectation[it+(int)layer_vector[i]] = 1;
00817             it += ss;
00818         }
00819     }
00820     layer->setExpectation( layer->expectation );
00821 }
00822 
00823 void DynamicallyLinkedRBMsModel::setLearningRate( real the_learning_rate )
00824 {
00825     input_layer->setLearningRate( the_learning_rate );
00826     hidden_layer->setLearningRate( the_learning_rate );
00827     input_connections->setLearningRate( the_learning_rate );
00828     if( dynamic_connections )
00829         dynamic_connections->setLearningRate( the_learning_rate ); //HUGO: multiply by dynamic_connections_learning_weight;
00830     if( hidden_layer2 )
00831     {
00832         hidden_layer2->setLearningRate( the_learning_rate );
00833         hidden_connections->setLearningRate( the_learning_rate );
00834     }
00835 
00836     for( int i=0; i<target_layers.length(); i++ )
00837     {
00838         target_layers[i]->setLearningRate( the_learning_rate );
00839         target_connections[i]->setLearningRate( the_learning_rate );
00840     }
00841 }
00842 
00843 void DynamicallyLinkedRBMsModel::recurrent_update()
00844 {
00845         hidden_temporal_gradient.resize(hidden_layer->size);
00846         hidden_temporal_gradient.clear();
00847         for(int i=hidden_list.length()-1; i>=0; i--){   
00848 
00849             if( hidden_layer2 )
00850                 hidden_gradient.resize(hidden_layer2->size);
00851             else
00852                 hidden_gradient.resize(hidden_layer->size);
00853             hidden_gradient.clear();
00854             if(use_target_layers_masks)
00855             {
00856                 for( int tar=0; tar<target_layers.length(); tar++)
00857                 {
00858                     if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00859                     {
00860                         target_layers[tar]->activation << target_prediction_act_no_bias_list[tar][i];
00861                         target_layers[tar]->activation += target_layers[tar]->bias;
00862                         target_layers[tar]->setExpectation(target_prediction_list[tar][i]);
00863                         target_layers[tar]->bpropNLL(targets_list[tar][i],nll_list(i,tar),bias_gradient);
00864                         bias_gradient *= target_layers_weights[tar];
00865                         bias_gradient *= masks_list[tar][i];
00866                         target_layers[tar]->update(bias_gradient);
00867                         if( hidden_layer2 )
00868                             target_connections[tar]->bpropUpdate(hidden2_list[i],target_prediction_act_no_bias_list[tar][i],
00869                                                                  hidden_gradient, bias_gradient,true);
00870                         else
00871                             target_connections[tar]->bpropUpdate(hidden_list[i],target_prediction_act_no_bias_list[tar][i],
00872                                                                  hidden_gradient, bias_gradient,true);
00873                     }
00874                 }
00875             }
00876             else
00877             {
00878                 for( int tar=0; tar<target_layers.length(); tar++)
00879                 {
00880                     if( !fast_exact_is_equal(target_layers_weights[tar],0) )
00881                     {
00882                         target_layers[tar]->activation << target_prediction_act_no_bias_list[tar][i];
00883                         target_layers[tar]->activation += target_layers[tar]->bias;
00884                         target_layers[tar]->setExpectation(target_prediction_list[tar][i]);
00885                         target_layers[tar]->bpropNLL(targets_list[tar][i],nll_list(i,tar),bias_gradient);
00886                         bias_gradient *= target_layers_weights[tar];
00887                         target_layers[tar]->update(bias_gradient);
00888                         if( hidden_layer2 )
00889                             target_connections[tar]->bpropUpdate(hidden2_list[i],target_prediction_act_no_bias_list[tar][i],
00890                                                                  hidden_gradient, bias_gradient,true); 
00891                         else
00892                             target_connections[tar]->bpropUpdate(hidden_list[i],target_prediction_act_no_bias_list[tar][i],
00893                                                                  hidden_gradient, bias_gradient,true); 
00894                         
00895                     }
00896                 }
00897             }
00898 
00899             if (hidden_layer2)
00900             {
00901                 hidden_layer2->bpropUpdate(
00902                     hidden2_act_no_bias_list[i], hidden2_list[i],
00903                     bias_gradient, hidden_gradient);
00904                 
00905                 hidden_connections->bpropUpdate(
00906                     hidden_list[i],
00907                     hidden2_act_no_bias_list[i], 
00908                     hidden_gradient, bias_gradient);
00909             }
00910             
00911             if(i!=0 && dynamic_connections )
00912             {   
00913                 hidden_gradient += hidden_temporal_gradient;
00914                 
00915                 hidden_layer->bpropUpdate(
00916                     hidden_act_no_bias_list[i], hidden_list[i],
00917                     hidden_temporal_gradient, hidden_gradient);
00918                 
00919                 dynamic_connections->bpropUpdate(
00920                     hidden_list[i-1],
00921                     hidden_act_no_bias_list[i], // Here, it should be cond_bias, but doesn't matter
00922                     hidden_gradient, hidden_temporal_gradient);
00923                 
00924                 hidden_temporal_gradient << hidden_gradient;
00925                 
00926                 input_connections->bpropUpdate(
00927                     input_list[i],
00928                     hidden_act_no_bias_list[i], 
00929                     visi_bias_gradient, hidden_temporal_gradient);// Here, it should be activations - cond_bias, but doesn't matter
00930                 
00931             }
00932             else
00933             {
00934                 hidden_layer->bpropUpdate(
00935                     hidden_act_no_bias_list[i], hidden_list[i],
00936                     hidden_temporal_gradient, hidden_gradient); // Not really temporal gradient, but this is the final iteration...
00937                 input_connections->bpropUpdate(
00938                     input_list[i],
00939                     hidden_act_no_bias_list[i], 
00940                     visi_bias_gradient, hidden_temporal_gradient);// Here, it should be activations - cond_bias, but doesn't matter
00941 
00942             }
00943         }
00944     
00945 }
00946 
00947 void DynamicallyLinkedRBMsModel::computeOutput(const Vec& input, Vec& output) const
00948 {
00949     PLERROR("DynamicallyLinkedRBMsModel::computeOutput(): this is a dynamic, "
00950             "generative model, that can only compute negative log-likelihood "
00951             "costs for a whole VMat");
00952 }
00953 
00954 void DynamicallyLinkedRBMsModel::computeCostsFromOutputs(const Vec& input, const Vec& output,
00955                                            const Vec& target, Vec& costs) const
00956 {
00957     PLERROR("DynamicallyLinkedRBMsModel::computeCostsFromOutputs(): this is a "
00958             "dynamic, generative model, that can only compute negative "
00959             "log-likelihooh costs for a whole VMat");
00960 }
00961 
00962 void DynamicallyLinkedRBMsModel::test(VMat testset, PP<VecStatsCollector> test_stats,
00963                   VMat testoutputs, VMat testcosts)const
00964 { 
00965 
00966     int len = testset.length();
00967     Vec input;
00968     Vec target;
00969     real weight;
00970 
00971     Vec output(outputsize());
00972     output.clear();
00973     Vec costs(nTestCosts());
00974     costs.clear();
00975     Vec n_items(nTestCosts());
00976     n_items.clear();
00977 
00978     PP<ProgressBar> pb;
00979     if (report_progress) 
00980         pb = new ProgressBar("Testing learner", len);
00981 
00982     if (len == 0) {
00983         // Empty test set: we give -1 cost arbitrarily.
00984         costs.fill(-1);
00985         test_stats->update(costs);
00986     }
00987     
00988     int ith_sample_in_sequence = 0;
00989     int inputsize_without_masks = inputsize() 
00990         - ( use_target_layers_masks ? targetsize() : 0 );
00991     int sum_target_elements = 0;
00992     for (int i = 0; i < len; i++)
00993     {
00994         testset.getExample(i, input, target, weight);
00995 
00996         if( fast_exact_is_equal(input[0],end_of_sequence_symbol) )
00997         {
00998             ith_sample_in_sequence = 0;
00999             hidden_list.resize(0);
01000             hidden_act_no_bias_list.resize(0);
01001             hidden2_list.resize(0);
01002             hidden2_act_no_bias_list.resize(0);
01003             target_prediction_list.resize(0);
01004             target_prediction_act_no_bias_list.resize(0);
01005             input_list.resize(0);
01006             targets_list.resize(0);
01007             nll_list.resize(0,0);
01008             masks_list.resize(0);
01009 
01010             if (testoutputs)
01011             {
01012                 output.fill(end_of_sequence_symbol);
01013                 testoutputs->putOrAppendRow(i, output);
01014             }
01015 
01016             continue;
01017         }
01018 
01019         // Resize internal variables
01020         hidden_list.resize(ith_sample_in_sequence+1);
01021         hidden_act_no_bias_list.resize(ith_sample_in_sequence+1);
01022         if( hidden_layer2 )
01023         {
01024             hidden2_list.resize(ith_sample_in_sequence+1);
01025             hidden2_act_no_bias_list.resize(ith_sample_in_sequence+1);
01026         }
01027                  
01028         input_list.resize(ith_sample_in_sequence+1);
01029         input_list[ith_sample_in_sequence].resize(input_layer->size);
01030 
01031         targets_list.resize( target_layers.length() );
01032         target_prediction_list.resize( target_layers.length() );
01033         target_prediction_act_no_bias_list.resize( target_layers.length() );
01034         for( int tar=0; tar < target_layers.length(); tar++ )
01035         {
01036             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01037             {
01038                 targets_list[tar].resize( ith_sample_in_sequence+1);
01039                 targets_list[tar][ith_sample_in_sequence].resize( 
01040                     target_layers[tar]->size);
01041                 target_prediction_list[tar].resize(
01042                     ith_sample_in_sequence+1);
01043                 target_prediction_act_no_bias_list[tar].resize(
01044                     ith_sample_in_sequence+1);
01045             }
01046         }
01047         nll_list.resize(ith_sample_in_sequence+1,target_layers.length());
01048         if( use_target_layers_masks )
01049         {
01050             masks_list.resize( target_layers.length() );
01051             for( int tar=0; tar < target_layers.length(); tar++ )
01052                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01053                     masks_list[tar].resize( ith_sample_in_sequence+1 );
01054         }
01055 
01056         // Forward propagation
01057 
01058         // Fetch right representation for input
01059         clamp_units(input.subVec(0,inputsize_without_masks),
01060                     input_layer,
01061                     input_symbol_sizes);                
01062         input_list[ith_sample_in_sequence] << input_layer->expectation;
01063 
01064         // Fetch right representation for target
01065         sum_target_elements = 0;
01066         for( int tar=0; tar < target_layers.length(); tar++ )
01067         {
01068             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01069             {
01070                 if( use_target_layers_masks )
01071                 {
01072                     clamp_units(target.subVec(
01073                                     sum_target_elements,
01074                                     target_layers_n_of_target_elements[tar]),
01075                                 target_layers[tar],
01076                                 target_symbol_sizes[tar],
01077                                 input.subVec(
01078                                     inputsize_without_masks 
01079                                     + sum_target_elements, 
01080                                     target_layers_n_of_target_elements[tar]),
01081                                 masks_list[tar][ith_sample_in_sequence]
01082                         );
01083                     
01084                 }
01085                 else
01086                 {
01087                     clamp_units(target.subVec(
01088                                     sum_target_elements,
01089                                     target_layers_n_of_target_elements[tar]),
01090                                 target_layers[tar],
01091                                 target_symbol_sizes[tar]);
01092                 }
01093                 targets_list[tar][ith_sample_in_sequence] << 
01094                     target_layers[tar]->expectation;
01095             }
01096             sum_target_elements += target_layers_n_of_target_elements[tar];
01097         }
01098                 
01099         input_connections->fprop( input_list[ith_sample_in_sequence], 
01100                                   hidden_act_no_bias_list[ith_sample_in_sequence]);
01101                 
01102         if( ith_sample_in_sequence > 0 && dynamic_connections )
01103         {
01104             dynamic_connections->fprop( 
01105                 hidden_list[ith_sample_in_sequence-1],
01106                 dynamic_act_no_bias_contribution );
01107 
01108             hidden_act_no_bias_list[ith_sample_in_sequence] += 
01109                 dynamic_act_no_bias_contribution;
01110         }
01111                  
01112         hidden_layer->fprop( hidden_act_no_bias_list[ith_sample_in_sequence], 
01113                              hidden_list[ith_sample_in_sequence] );
01114                  
01115         if( hidden_layer2 )
01116         {
01117             hidden_connections->fprop( 
01118                 hidden_list[ith_sample_in_sequence],
01119                 hidden2_act_no_bias_list[ith_sample_in_sequence]);
01120 
01121             hidden_layer2->fprop( 
01122                 hidden2_act_no_bias_list[ith_sample_in_sequence],
01123                 hidden2_list[ith_sample_in_sequence] 
01124                 );
01125 
01126             for( int tar=0; tar < target_layers.length(); tar++ )
01127             {
01128                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01129                 {
01130                     target_connections[tar]->fprop(
01131                         hidden2_list[ith_sample_in_sequence],
01132                         target_prediction_act_no_bias_list[tar][
01133                             ith_sample_in_sequence]
01134                         );
01135                     target_layers[tar]->fprop(
01136                         target_prediction_act_no_bias_list[tar][
01137                             ith_sample_in_sequence],
01138                         target_prediction_list[tar][
01139                             ith_sample_in_sequence] );
01140                     if( use_target_layers_masks )
01141                         target_prediction_list[tar][ ith_sample_in_sequence] *= 
01142                             masks_list[tar][ith_sample_in_sequence];
01143                 }
01144             }
01145         }
01146         else
01147         {
01148             for( int tar=0; tar < target_layers.length(); tar++ )
01149             {
01150                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01151                 {
01152                     target_connections[tar]->fprop(
01153                         hidden_list[ith_sample_in_sequence],
01154                         target_prediction_act_no_bias_list[tar][
01155                             ith_sample_in_sequence]
01156                         );
01157                     target_layers[tar]->fprop(
01158                         target_prediction_act_no_bias_list[tar][
01159                             ith_sample_in_sequence],
01160                         target_prediction_list[tar][
01161                             ith_sample_in_sequence] );
01162                     if( use_target_layers_masks )
01163                         target_prediction_list[tar][ ith_sample_in_sequence] *= 
01164                             masks_list[tar][ith_sample_in_sequence];
01165                 }
01166             }
01167         }
01168 
01169         if (testoutputs)
01170         {
01171             int sum_target_layers_size = 0;
01172             for( int tar=0; tar < target_layers.length(); tar++ )
01173             {
01174                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01175                 {
01176                     output.subVec(sum_target_layers_size,target_layers[tar]->size)
01177                         << target_prediction_list[tar][ ith_sample_in_sequence ];
01178                 }
01179                 sum_target_layers_size += target_layers[tar]->size;
01180             }
01181             testoutputs->putOrAppendRow(i, output);
01182         }
01183 
01184         sum_target_elements = 0;
01185         for( int tar=0; tar < target_layers.length(); tar++ )
01186         {
01187             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01188             {
01189                 target_layers[tar]->activation << 
01190                     target_prediction_act_no_bias_list[tar][
01191                         ith_sample_in_sequence];
01192                 target_layers[tar]->activation += target_layers[tar]->bias;
01193                 target_layers[tar]->setExpectation(
01194                     target_prediction_list[tar][
01195                         ith_sample_in_sequence]);
01196                 nll_list(ith_sample_in_sequence,tar) = 
01197                     target_layers[tar]->fpropNLL( 
01198                         targets_list[tar][ith_sample_in_sequence] ); 
01199                 costs[tar] += nll_list(ith_sample_in_sequence,tar);
01200                 
01201                 // Normalize by the number of things to predict
01202                 if( use_target_layers_masks )
01203                 {
01204                     n_items[tar] += sum(
01205                         input.subVec( inputsize_without_masks 
01206                                       + sum_target_elements, 
01207                                       target_layers_n_of_target_elements[tar]) );
01208                 }
01209                 else
01210                     n_items[tar]++;
01211             }
01212             if( use_target_layers_masks )
01213                 sum_target_elements += 
01214                     target_layers_n_of_target_elements[tar];
01215         }
01216         ith_sample_in_sequence++;
01217 
01218         if (report_progress)
01219             pb->update(i);
01220 
01221     }
01222 
01223     for(int i=0; i<costs.length(); i++)
01224     {
01225         if( !fast_exact_is_equal(target_layers_weights[i],0) )
01226             costs[i] /= n_items[i];
01227         else
01228             costs[i] = MISSING_VALUE;
01229     }
01230     if (testcosts)
01231         testcosts->putOrAppendRow(0, costs);
01232     
01233     if (test_stats)
01234         test_stats->update(costs, weight);
01235     
01236     ith_sample_in_sequence = 0;
01237     hidden_list.resize(0);
01238     hidden_act_no_bias_list.resize(0);
01239     hidden2_list.resize(0);
01240     hidden2_act_no_bias_list.resize(0);
01241     target_prediction_list.resize(0);
01242     target_prediction_act_no_bias_list.resize(0);
01243     input_list.resize(0);
01244     targets_list.resize(0);
01245     nll_list.resize(0,0);
01246     masks_list.resize(0);   
01247 }
01248 
01249 
01250 TVec<string> DynamicallyLinkedRBMsModel::getTestCostNames() const
01251 {
01252     TVec<string> cost_names(0);
01253     for( int i=0; i<target_layers.length(); i++ )
01254         cost_names.append("target" + tostring(i) + ".NLL");
01255     return cost_names;
01256 }
01257 
01258 TVec<string> DynamicallyLinkedRBMsModel::getTrainCostNames() const
01259 {
01260     return getTestCostNames();
01261 }
01262 
01263 void DynamicallyLinkedRBMsModel::generate(int t, int n)
01264 {
01265     //PPath* the_filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/scoreGen.amat";
01266     data = new AutoVMatrix();
01267     data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/listData/target_tm12_input_t_tm12_tp12/scoreGen_tar_tm12__in_tm12_tp12.amat";
01268     //data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/create_data/scoreGenSuitePerf.amat";
01269 
01270     data->defineSizes(208,16,0);
01271     //data->inputsize = 21;
01272     //data->targetsize = 0;
01273     //data->weightsize = 0;
01274     data->build();
01275 
01276     
01277     
01278    
01279    
01280 
01281     int len = data->length();
01282     int tarSize = outputsize();
01283     int partTarSize;
01284     Vec input;
01285     Vec target;
01286     real weight;
01287 
01288     Vec output(outputsize());
01289     output.clear();
01290     /*Vec costs(nTestCosts());
01291     costs.clear();
01292     Vec n_items(nTestCosts());
01293     n_items.clear();*/
01294 
01295     int r,r2;
01296     
01297     int ith_sample_in_sequence = 0;
01298     int inputsize_without_masks = inputsize() 
01299         - ( use_target_layers_masks ? targetsize() : 0 );
01300     int sum_target_elements = 0;
01301     for (int i = 0; i < len; i++)
01302     {
01303         data->getExample(i, input, target, weight);
01304         if(i>n)
01305         {
01306             for (int k = 1; k <= t; k++)
01307             {
01308                 if(k<=i){
01309                     partTarSize = outputsize();
01310                     for( int tar=0; tar < target_layers.length(); tar++ )
01311                     {
01312                         
01313                         input.subVec(inputsize_without_masks-(tarSize*(t-k))-partTarSize-1,target_layers[tar]->size) << target_prediction_list[tar][ith_sample_in_sequence-k];
01314                         partTarSize -= target_layers[tar]->size;
01315                         
01316                         
01317                     }
01318                 }
01319             }       
01320         }
01321     
01322 /*
01323         for (int k = 1; k <= t; k++)
01324         {
01325             partTarSize = outputsize();
01326             for( int tar=0; tar < target_layers.length(); tar++ )
01327             {
01328                 if(i>=t){
01329                     input.subVec(inputsize_without_masks-(tarSize*(t-k))-partTarSize-1,target_layers[tar]->size) << target_prediction_list[tar][ith_sample_in_sequence-k];
01330                     partTarSize -= target_layers[tar]->size;
01331                 }
01332             }
01333         }
01334 */
01335         if( fast_exact_is_equal(input[0],end_of_sequence_symbol) )
01336         {
01337             /*  ith_sample_in_sequence = 0;
01338             hidden_list.resize(0);
01339             hidden_act_no_bias_list.resize(0);
01340             hidden2_list.resize(0);
01341             hidden2_act_no_bias_list.resize(0);
01342             target_prediction_list.resize(0);
01343             target_prediction_act_no_bias_list.resize(0);
01344             input_list.resize(0);
01345             targets_list.resize(0);
01346             nll_list.resize(0,0);
01347             masks_list.resize(0);*/
01348 
01349             
01350 
01351             continue;
01352         }
01353 
01354         // Resize internal variables
01355         hidden_list.resize(ith_sample_in_sequence+1);
01356         hidden_act_no_bias_list.resize(ith_sample_in_sequence+1);
01357         if( hidden_layer2 )
01358         {
01359             hidden2_list.resize(ith_sample_in_sequence+1);
01360             hidden2_act_no_bias_list.resize(ith_sample_in_sequence+1);
01361         }
01362                  
01363         input_list.resize(ith_sample_in_sequence+1);
01364         input_list[ith_sample_in_sequence].resize(input_layer->size);
01365 
01366         targets_list.resize( target_layers.length() );
01367         target_prediction_list.resize( target_layers.length() );
01368         target_prediction_act_no_bias_list.resize( target_layers.length() );
01369         for( int tar=0; tar < target_layers.length(); tar++ )
01370         {
01371             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01372             {
01373                 targets_list[tar].resize( ith_sample_in_sequence+1);
01374                 targets_list[tar][ith_sample_in_sequence].resize( 
01375                     target_layers[tar]->size);
01376                 target_prediction_list[tar].resize(
01377                     ith_sample_in_sequence+1);
01378                 target_prediction_act_no_bias_list[tar].resize(
01379                     ith_sample_in_sequence+1);
01380             }
01381         }
01382         nll_list.resize(ith_sample_in_sequence+1,target_layers.length());
01383         if( use_target_layers_masks )
01384         {
01385             masks_list.resize( target_layers.length() );
01386             for( int tar=0; tar < target_layers.length(); tar++ )
01387                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01388                     masks_list[tar].resize( ith_sample_in_sequence+1 );
01389         }
01390 
01391         // Forward propagation
01392 
01393         // Fetch right representation for input
01394         clamp_units(input.subVec(0,inputsize_without_masks),
01395                     input_layer,
01396                     input_symbol_sizes);                
01397         input_list[ith_sample_in_sequence] << input_layer->expectation;
01398 
01399         // Fetch right representation for target
01400         sum_target_elements = 0;
01401         for( int tar=0; tar < target_layers.length(); tar++ )
01402         {
01403             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01404             {
01405                 if( use_target_layers_masks )
01406                 {
01407                     clamp_units(target.subVec(
01408                                     sum_target_elements,
01409                                     target_layers_n_of_target_elements[tar]),
01410                                 target_layers[tar],
01411                                 target_symbol_sizes[tar],
01412                                 input.subVec(
01413                                     inputsize_without_masks 
01414                                     + sum_target_elements, 
01415                                     target_layers_n_of_target_elements[tar]),
01416                                 masks_list[tar][ith_sample_in_sequence]
01417                         );
01418                     
01419                 }
01420                 else
01421                 {
01422                     clamp_units(target.subVec(
01423                                     sum_target_elements,
01424                                     target_layers_n_of_target_elements[tar]),
01425                                 target_layers[tar],
01426                                 target_symbol_sizes[tar]);
01427                 }
01428                 targets_list[tar][ith_sample_in_sequence] << 
01429                     target_layers[tar]->expectation;
01430             }
01431             sum_target_elements += target_layers_n_of_target_elements[tar];
01432         }
01433                 
01434         input_connections->fprop( input_list[ith_sample_in_sequence], 
01435                                   hidden_act_no_bias_list[ith_sample_in_sequence]);
01436                 
01437         if( ith_sample_in_sequence > 0 && dynamic_connections )
01438         {
01439             dynamic_connections->fprop( 
01440                 hidden_list[ith_sample_in_sequence-1],
01441                 dynamic_act_no_bias_contribution );
01442 
01443             hidden_act_no_bias_list[ith_sample_in_sequence] += 
01444                 dynamic_act_no_bias_contribution;
01445         }
01446                  
01447         hidden_layer->fprop( hidden_act_no_bias_list[ith_sample_in_sequence], 
01448                              hidden_list[ith_sample_in_sequence] );
01449                  
01450         if( hidden_layer2 )
01451         {
01452             hidden_connections->fprop( 
01453                 hidden_list[ith_sample_in_sequence],
01454                 hidden2_act_no_bias_list[ith_sample_in_sequence]);
01455 
01456             hidden_layer2->fprop( 
01457                 hidden2_act_no_bias_list[ith_sample_in_sequence],
01458                 hidden2_list[ith_sample_in_sequence] 
01459                 );
01460 
01461             for( int tar=0; tar < target_layers.length(); tar++ )
01462             {
01463                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01464                 {
01465                     target_connections[tar]->fprop(
01466                         hidden2_list[ith_sample_in_sequence],
01467                         target_prediction_act_no_bias_list[tar][
01468                             ith_sample_in_sequence]
01469                         );
01470                     target_layers[tar]->fprop(
01471                         target_prediction_act_no_bias_list[tar][
01472                             ith_sample_in_sequence],
01473                         target_prediction_list[tar][
01474                             ith_sample_in_sequence] );
01475                     if( use_target_layers_masks )
01476                         target_prediction_list[tar][ ith_sample_in_sequence] *= 
01477                             masks_list[tar][ith_sample_in_sequence];
01478                 }
01479             }
01480         }
01481         else
01482         {
01483             for( int tar=0; tar < target_layers.length(); tar++ )
01484             {
01485                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01486                 {
01487                     target_connections[tar]->fprop(
01488                         hidden_list[ith_sample_in_sequence],
01489                         target_prediction_act_no_bias_list[tar][
01490                             ith_sample_in_sequence]
01491                         );
01492                     target_layers[tar]->fprop(
01493                         target_prediction_act_no_bias_list[tar][
01494                             ith_sample_in_sequence],
01495                         target_prediction_list[tar][
01496                             ith_sample_in_sequence] );
01497                     if( use_target_layers_masks )
01498                         target_prediction_list[tar][ ith_sample_in_sequence] *= 
01499                             masks_list[tar][ith_sample_in_sequence];
01500                 }
01501             }
01502         }
01503 
01504         
01505 
01506         sum_target_elements = 0;
01507         for( int tar=0; tar < target_layers.length(); tar++ )
01508         {
01509             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01510             {
01511                 target_layers[tar]->activation << 
01512                     target_prediction_act_no_bias_list[tar][
01513                         ith_sample_in_sequence];
01514                 target_layers[tar]->activation += target_layers[tar]->bias;
01515                 target_layers[tar]->setExpectation(
01516                     target_prediction_list[tar][
01517                         ith_sample_in_sequence]);
01518                 nll_list(ith_sample_in_sequence,tar) = 
01519                     target_layers[tar]->fpropNLL( 
01520                         targets_list[tar][ith_sample_in_sequence] ); 
01521                 /*costs[tar] += nll_list(ith_sample_in_sequence,tar);
01522                 
01523                 // Normalize by the number of things to predict
01524                 if( use_target_layers_masks )
01525                 {
01526                     n_items[tar] += sum(
01527                         input.subVec( inputsize_without_masks 
01528                                       + sum_target_elements, 
01529                                       target_layers_n_of_target_elements[tar]) );
01530                 }
01531                 else
01532                 n_items[tar]++;*/
01533             }
01534             if( use_target_layers_masks )
01535                 sum_target_elements += 
01536                     target_layers_n_of_target_elements[tar];
01537         }
01538         ith_sample_in_sequence++;
01539 
01540         
01541 
01542     }
01543 
01544     /*  
01545     ith_sample_in_sequence = 0;
01546     hidden_list.resize(0);
01547     hidden_act_no_bias_list.resize(0);
01548     hidden2_list.resize(0);
01549     hidden2_act_no_bias_list.resize(0);
01550     target_prediction_list.resize(0);
01551     target_prediction_act_no_bias_list.resize(0);
01552     input_list.resize(0);
01553     targets_list.resize(0);
01554     nll_list.resize(0,0);
01555     masks_list.resize(0);   
01556 
01557 
01558     */
01559 
01560 
01561 
01562 
01563 
01564 
01565 
01566 
01567 
01568     
01569     //Vec tempo;
01570     //TVec<real> tempo;
01571     //tempo.resize(visible_layer->size);
01572     ofstream myfile;
01573     myfile.open ("/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/test.txt");
01574     
01575     for (int i = 0; i < target_prediction_list[0].length() ; i++ ){
01576        
01577        
01578         for( int tar=0; tar < target_layers.length(); tar++ )
01579         {
01580             for (int j = 0; j < target_prediction_list[tar][i].length() ; j++ ){
01581                 
01582                 if(i>n){
01583                     myfile << target_prediction_list[tar][i][j] << " ";
01584                 }
01585                 else{
01586                     myfile << targets_list[tar][i][j] << " ";
01587                 }
01588                        
01589            
01590             }
01591         }
01592         myfile << "\n";
01593     }
01594      
01595 
01596      myfile.close();
01597 
01598 }
01599 /*
01600 void DynamicallyLinkedRBMsModel::gen()
01601 {
01602     //PPath* the_filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/scoreGen.amat";
01603     data = new AutoVMatrix();
01604     data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/scoreGen.amat";
01605     data->defineSizes(21,0,0);
01606     //data->inputsize = 21;
01607     //data->targetsize = 0;
01608     //data->weightsize = 0;
01609     data->build();
01610 
01611     
01612     int len = data->length();
01613     Vec score;
01614     Vec target;
01615     real weight;
01616     Vec bias_tempo;
01617     Vec visi_bias_tempo;
01618    
01619    
01620     
01621     previous_hidden_layer.resize(hidden_layer->size);
01622     connections_idem = connections;
01623 
01624     for (int ith_sample = 0; ith_sample < len ; ith_sample++ ){
01625         
01626         data->getExample(ith_sample, score, target, weight);
01627         //score << data(ith_sample);
01628         input_prediction_list.resize(
01629             ith_sample+1,visible_layer->size);
01630         if(ith_sample > 0)
01631         {
01632             
01633             //input_list(ith_sample_in_sequence) << previous_input;
01634             //h*_{t-1}
01636             dynamic_connections->fprop(previous_hidden_layer, cond_bias);
01637             hidden_layer->setAllBias(cond_bias); 
01638             
01639             
01640             
01641             //up phase
01642             connections->setAsDownInput( input_prediction_list(ith_sample-1) );
01643             hidden_layer->getAllActivations( connections_idem );
01644             hidden_layer->computeExpectation();
01646             
01647             //previous_hidden_layer << hidden_layer->expectation;//h_{t-2} au prochain tour
01648             //previous_hidden_layer_act_no_bias << hidden_layer->activation;
01649             
01650             
01651             //h*_{t}
01653             if(dynamic_connections_copy)
01654                 dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01655             else
01656                 dynamic_connections->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01657             //dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01658             hidden_layer->expectation_is_not_up_to_date();
01659             hidden_layer->computeExpectation();//h_{t}
01661             
01662             //previous_input << visible_layer->expectation;//v_{t-1}
01663             
01664         }
01665         else
01666         {
01667             
01668             previous_hidden_layer.clear();//h_{t-1}
01669             if(dynamic_connections_copy)
01670                 dynamic_connections_copy->fprop( previous_hidden_layer ,
01671                                                  hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01672             else
01673                 dynamic_connections->fprop(previous_hidden_layer,
01674                                            hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01675             
01676             hidden_layer->expectation_is_not_up_to_date();
01677             hidden_layer->computeExpectation();//h_{t}
01678             //previous_input.resize(data->inputsize);
01679             //previous_input << data(ith_sample);
01680             
01681         }
01682         
01683         //connections_transpose->setAsDownInput( hidden_layer->expectation );
01684         //visible_layer->getAllActivations( connections_idem_t );
01685         
01686         connections->setAsUpInput( hidden_layer->expectation );
01687         visible_layer->getAllActivations( connections_idem );
01688         
01689         visible_layer->computeExpectation();
01690         //visible_layer->generateSample();
01691         partition(score.subVec(14,taillePart), visible_layer->activation.subVec(14+taillePart,taillePart), visible_layer->activation.subVec(14+(taillePart*2),taillePart));
01692         partition(score.subVec(14,taillePart), visible_layer->expectation.subVec(14+taillePart,taillePart), visible_layer->expectation.subVec(14+(taillePart*2),taillePart));
01693 
01694 
01695         visible_layer->activation.subVec(0,14+taillePart) << score;
01696         visible_layer->expectation.subVec(0,14+taillePart) << score;
01697 
01698         input_prediction_list(ith_sample) << visible_layer->expectation;
01699         
01700     }
01701     
01702     //Vec tempo;
01703     TVec<real> tempo;
01704     tempo.resize(visible_layer->size);
01705     ofstream myfile;
01706     myfile.open ("/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/test.txt");
01707     
01708     for (int i = 0; i < len ; i++ ){
01709         tempo << input_prediction_list(i);
01710         
01711         //cout << tempo[2] << endl;
01712        
01713         for (int j = 0; j < tempo.length() ; j++ ){
01714             
01715             
01716                 
01717                 
01718                myfile << tempo[j] << " ";
01719                
01720 
01721                
01722            
01723         }
01724         myfile << "\n";
01725     }
01726      
01727 
01728      myfile.close();
01729 
01730 }*/
01731 //void DynamicallyLinkedRBMsModel::generate(int nbNotes)
01732 //{
01733 //    
01734 //    previous_hidden_layer.resize(hidden_layer->size);
01735 //    connections_idem = connections;
01736 //
01737 //    for (int ith_sample = 0; ith_sample < nbNotes ; ith_sample++ ){
01738 //        
01739 //        input_prediction_list.resize(
01740 //            ith_sample+1,visible_layer->size);
01741 //        if(ith_sample > 0)
01742 //        {
01743 //            
01744 //            //input_list(ith_sample_in_sequence) << previous_input;
01745 //            //h*_{t-1}
01746 //            //////////////////////////////////
01747 //            dynamic_connections->fprop(previous_hidden_layer, cond_bias);
01748 //            hidden_layer->setAllBias(cond_bias); //**************************
01749 //            
01750 //            
01751 //            
01752 //            //up phase
01753 //            connections->setAsDownInput( input_prediction_list(ith_sample-1) );
01754 //            hidden_layer->getAllActivations( connections_idem );
01755 //            hidden_layer->computeExpectation();
01756 //            //////////////////////////////////
01757 //            
01758 //            //previous_hidden_layer << hidden_layer->expectation;//h_{t-2} au prochain tour//******************************
01759 //            //previous_hidden_layer_act_no_bias << hidden_layer->activation;
01760 //            
01761 //            
01762 //            //h*_{t}
01763 //            ////////////
01764 //            if(dynamic_connections_copy)
01765 //                dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01766 //            else
01767 //                dynamic_connections->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01768 //            //dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01769 //            hidden_layer->expectation_is_not_up_to_date();
01770 //            hidden_layer->computeExpectation();//h_{t}
01771 //            ///////////
01772 //            
01773 //            //previous_input << visible_layer->expectation;//v_{t-1}
01774 //            
01775 //        }
01776 //        else
01777 //        {
01778 //            
01779 //            previous_hidden_layer.clear();//h_{t-1}
01780 //            if(dynamic_connections_copy)
01781 //                dynamic_connections_copy->fprop( previous_hidden_layer ,
01782 //                                                 hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01783 //            else
01784 //                dynamic_connections->fprop(previous_hidden_layer,
01785 //                                           hidden_layer->activation);//conection entre h_{t-1} et h_{t}
01786 //            
01787 //            hidden_layer->expectation_is_not_up_to_date();
01788 //            hidden_layer->computeExpectation();//h_{t}
01789 //            
01790 //            
01791 //        }
01792 //        
01793 //        //connections_transpose->setAsDownInput( hidden_layer->expectation );
01794 //        //visible_layer->getAllActivations( connections_idem_t );
01795 //        
01796 //        connections->setAsUpInput( hidden_layer->expectation );
01797 //        visible_layer->getAllActivations( connections_idem );
01798 //        
01799 //        visible_layer->computeExpectation();
01800 //        visible_layer->generateSample();
01801 //        
01802 //        input_prediction_list(ith_sample) << visible_layer->sample;
01803 //        
01804 //    }
01805 //    
01806 //    //Vec tempo;
01807 //    TVec<int> tempo;
01808 //    tempo.resize(visible_layer->size);
01809 //    int theNote;
01810 //    //int nbNoteVisiLayer = input_prediction_list(1).length()/13;
01811 //    ofstream myfile;
01812 //    int theLayer;
01813 //    myfile.open ("/home/stan/Documents/recherche_maitrise/DDBN_musicGeneration/data/generate/test.txt");
01814 //    
01815 //    for (int i = 0; i < nbNotes ; i++ ){
01816 //        tempo << input_prediction_list(i);
01817 //        
01818 //        //cout << tempo[2] << endl;
01819 //       
01820 //        for (int j = 0; j < tempo.length() ; j++ ){
01821 //            
01822 //            if (tempo[j] == 1){
01823 //                theLayer = (j/13);
01824 //                
01825 //                theNote = j - (13*theLayer);
01826 //               
01827 //
01828 //                if (theNote<=11){
01829 //                    //print theNote
01830 //                    //cout << theNote+50 << " ";
01831 //                    myfile << theNote << " ";
01832 //                }
01833 //                else{
01834 //                    //print #
01835 //                    //cout << "# ";
01836 //                    myfile << "# ";
01837 //                    
01838 //                }
01839 //     
01840 //            }
01841 //           
01842 //        }
01843 //        myfile << "\n";
01844 //    }
01845 //     myfile << "<oov> <oov> \n";
01846 //
01847 //     myfile.close();
01848 //
01849 //}
01850 
01851 } // end of namespace PLearn
01852 
01853 
01854 /*
01855   Local Variables:
01856   mode:c++
01857   c-basic-offset:4
01858   c-file-style:"stroustrup"
01859   c-file-offsets:((innamespace . 0)(inline-open . 0))
01860   indent-tabs-mode:nil
01861   fill-column:79
01862   End:
01863 */
01864 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines