PLearn 0.1
DenoisingRecurrentNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DenoisingRecurrentNet.cc
00004 //
00005 // Copyright (C) 2006 Stanislas Lauly
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Stanislas Lauly
00036 
00040 #define PL_LOG_MODULE_NAME "DenoisingRecurrentNet"
00041 #include <plearn/io/pl_log.h>
00042 
00043 #include "DenoisingRecurrentNet.h"
00044 #include "plearn/math/plapack.h"
00045 
00046 // - commiter mse
00047 // - ajouter denoising recurrent net. Deux possibilités:
00048 //   1) on ajoute du bruit à l'input, et on reconstruit les targets avec des poids
00049 //      possiblement différents
00050 //     * option denoising_target_layers_weights (c'est là qu'on met l'input)
00051 //     * version de clamp_units qui ajoute le bruit
00052 //   2) on reconstruit l'input directement (sans 2e couche cachée)
00053 //     * toujours clamp_units qui ajoute le bruit
00054 //     * une option qui dit quelle partie de l'input reconstruire et du code 
00055 //       pour bloquer le gradient qui ne doit pas passer (pas très propre, 
00056 //       mais bon...)
00057 //     * une option donnant les connections de reconstruction
00058 //     * du code pour entraîner séparément les hidden_connections (si présentes)
00059 // - pourrait avoir le gradient du denoising recurrent net en même temps que
00060 //   celui du "fine-tuning"
00061 // - add dynamic_activations_list and use it in recurrentUpdate
00062 
00063 
00064 namespace PLearn {
00065 using namespace std;
00066 
00067 PLEARN_IMPLEMENT_OBJECT(
00068     DenoisingRecurrentNet,
00069     "Model made of RBMs linked through time",
00070     ""
00071     );
00072 
00073 DenoisingRecurrentNet::DenoisingRecurrentNet() :
00074     use_target_layers_masks( false ),
00075     end_of_sequence_symbol( -1000 ),
00076     encoding("note_octav_duration"),
00077     input_window_size(1),
00078     tied_input_reconstruction_weights( true ),
00079     input_noise_prob( 0.15 ),
00080     input_reconstruction_lr( 0 ),
00081     hidden_noise_prob( 0.15 ),
00082     hidden_reconstruction_lr( 0 ),
00083     tied_hidden_reconstruction_weights( false ),
00084     noisy_recurrent_lr( 0.000001),
00085     dynamic_gradient_scale_factor( 1.0 ),
00086     recurrent_lr( 0.00001 ),
00087     prediction_cost_weight(1),
00088     input_reconstruction_cost_weight(0),
00089     hidden_reconstruction_cost_weight(0),
00090     current_learning_rate(0),
00091     nb_stage_reconstruction(0),
00092     nb_stage_target(0),
00093     noise(false),
00094     L1_penalty_factor(0),
00095     L2_penalty_factor(0)
00096 {
00097     random_gen = new PRandom();
00098 }
00099 
00100 void DenoisingRecurrentNet::declareOptions(OptionList& ol)
00101 {
00102 //    declareOption(ol, "rbm_learning_rate", &DenoisingRecurrentNet::rbm_learning_rate,
00103 //                  OptionBase::buildoption,
00104 //                  "The learning rate used during RBM contrastive "
00105 //                  "divergence learning phase.\n");
00106 
00107 //    declareOption(ol, "rbm_nstages", &DenoisingRecurrentNet::rbm_nstages,
00108 //                  OptionBase::buildoption,
00109 //                  "Number of epochs for rbm phase.\n");
00110 
00111 
00112     declareOption(ol, "target_layers_weights", 
00113                   &DenoisingRecurrentNet::target_layers_weights,
00114                   OptionBase::buildoption,
00115                   "The training weights of each target layers.\n");
00116 
00117     declareOption(ol, "end_of_sequence_symbol", 
00118                   &DenoisingRecurrentNet::end_of_sequence_symbol,
00119                   OptionBase::buildoption,
00120                   "Value of the first input component for end-of-sequence "
00121                   "delimiter.\n");
00122 
00123     // TO DO: input_layer is to be removed eventually because only its size is really used
00124     declareOption(ol, "input_layer", &DenoisingRecurrentNet::input_layer,
00125                   OptionBase::buildoption,
00126                   "The input layer of the model.\n");
00127 
00128     declareOption(ol, "target_layers", &DenoisingRecurrentNet::target_layers,
00129                   OptionBase::buildoption,
00130                   "The target layers of the model.\n");
00131 
00132     declareOption(ol, "hidden_layer", &DenoisingRecurrentNet::hidden_layer,
00133                   OptionBase::buildoption,
00134                   "The hidden layer of the model.\n");
00135 
00136     declareOption(ol, "hidden_layer2", &DenoisingRecurrentNet::hidden_layer2,
00137                   OptionBase::buildoption,
00138                   "The second hidden layer of the model (optional).\n");
00139 
00140     declareOption(ol, "dynamic_connections", 
00141                   &DenoisingRecurrentNet::dynamic_connections,
00142                   OptionBase::buildoption,
00143                   "The RBMConnection between the first hidden layers, "
00144                   "through time (optional).\n");
00145 
00146     declareOption(ol, "dynamic_reconstruction_connections", 
00147                   &DenoisingRecurrentNet::dynamic_reconstruction_connections,
00148                   OptionBase::buildoption,
00149                   "The RBMConnection for the reconstruction between the hidden layers, "
00150                   "through time (optional).\n");
00151 
00152     declareOption(ol, "hidden_connections", 
00153                   &DenoisingRecurrentNet::hidden_connections,
00154                   OptionBase::buildoption,
00155                   "The RBMConnection between the first and second "
00156                   "hidden layers (optional).\n");
00157 
00158     declareOption(ol, "input_connections", 
00159                   &DenoisingRecurrentNet::input_connections,
00160                   OptionBase::buildoption,
00161                   "The RBMConnection from input_layer to hidden_layer.\n");
00162 
00163     declareOption(ol, "target_connections", 
00164                   &DenoisingRecurrentNet::target_connections,
00165                   OptionBase::buildoption,
00166                   "The RBMConnection from input_layer to hidden_layer.\n");
00167 
00168     declareOption(ol, "target_layers_n_of_target_elements", 
00169                   &DenoisingRecurrentNet::target_layers_n_of_target_elements,
00170                   OptionBase::learntoption,
00171                   "Number of elements in the target part of a VMatrix associated\n"
00172                   "to each target layer.\n");
00173 
00174     declareOption(ol, "input_symbol_sizes", 
00175                   &DenoisingRecurrentNet::input_symbol_sizes,
00176                   OptionBase::learntoption,
00177                   "Number of symbols for each symbolic field of train_set.\n");
00178 
00179     declareOption(ol, "target_symbol_sizes", 
00180                   &DenoisingRecurrentNet::target_symbol_sizes,
00181                   OptionBase::learntoption,
00182                   "Number of symbols for each symbolic field of train_set.\n");
00183 
00184 
00185 
00186 
00187 
00188     
00189     declareOption(ol, "encoding", 
00190                   &DenoisingRecurrentNet::encoding,
00191                   OptionBase::buildoption,
00192                   "Chooses what type of encoding to apply to an input sequence\n"
00193                   "Possibilities: timeframe, note_duration, note_octav_duration, raw_masked_supervised");
00194 
00195     declareOption(ol, "input_window_size", 
00196                   &DenoisingRecurrentNet::input_window_size,
00197                   OptionBase::buildoption,
00198                   "How many time steps to present as input\n"
00199                   "If it's 0, then all layers are essentially ignored, and instead an unconditional predictor is trained\n"
00200                   "This option is ignored when mode is raw_masked_supervised,"
00201                   "since in this mode the full expanded and preprocessed input and target are given explicitly."
00202         );
00203 
00204     declareOption(ol, "tied_input_reconstruction_weights", 
00205                   &DenoisingRecurrentNet::tied_input_reconstruction_weights,
00206                   OptionBase::buildoption,
00207                   "Do we want the input reconstruction weights tied or not\n"
00208                   "Boolean, yes or no");
00209 
00210     declareOption(ol, "input_noise_prob", 
00211                   &DenoisingRecurrentNet::input_noise_prob,
00212                   OptionBase::buildoption,
00213                   "Probability, for each neurone of each input, to be set to zero\n");
00214 
00215     declareOption(ol, "input_reconstruction_lr", 
00216                   &DenoisingRecurrentNet::input_reconstruction_lr,
00217                   OptionBase::buildoption,
00218                   "The learning rate used for the reconstruction\n");
00219 
00220     declareOption(ol, "hidden_noise_prob", 
00221                   &DenoisingRecurrentNet::hidden_noise_prob,
00222                   OptionBase::buildoption,
00223                   "Probability, for each neurone of each hidden layer, to be set to zero\n");
00224 
00225     declareOption(ol, "hidden_reconstruction_lr", 
00226                   &DenoisingRecurrentNet::hidden_reconstruction_lr,
00227                   OptionBase::buildoption,
00228                   "The learning rate used for the dynamic reconstruction through time\n");
00229 
00230     declareOption(ol, "tied_hidden_reconstruction_weights", 
00231                   &DenoisingRecurrentNet::tied_hidden_reconstruction_weights,
00232                   OptionBase::buildoption,
00233                   "Do we want the dynamic reconstruction weights tied or not\n"
00234                   "Boolean, yes or no");
00235 
00236     declareOption(ol, "noisy_recurrent_lr", 
00237                   &DenoisingRecurrentNet::noisy_recurrent_lr,
00238                   OptionBase::buildoption,
00239                   "The learning rate used in the noisy recurrent phase for the input reconstruction\n");
00240 
00241     declareOption(ol, "dynamic_gradient_scale_factor", 
00242                   &DenoisingRecurrentNet::dynamic_gradient_scale_factor,
00243                   OptionBase::buildoption,
00244                   "The scale factor of the learning rate used in the noisy recurrent phase for the dynamic hidden reconstruction\n");
00245 
00246     declareOption(ol, "recurrent_lr", 
00247                   &DenoisingRecurrentNet::recurrent_lr,
00248                   OptionBase::buildoption,
00249                   "The learning rate used in the fine tuning phase\n");
00250 
00251     declareOption(ol, "mean_encoded_vec", &DenoisingRecurrentNet::mean_encoded_vec,
00252                   OptionBase::learntoption,
00253                   "When training with trainUnconditionalPredictor (if input_window_size==0), this is simply used to store the the avg encoded frame");
00254 
00255     declareOption(ol, "prediction_cost_weight", &DenoisingRecurrentNet::prediction_cost_weight,
00256                   OptionBase::learntoption,
00257                   "The training weight for the target prediction");
00258 
00259     declareOption(ol, "input_reconstruction_cost_weight", &DenoisingRecurrentNet::input_reconstruction_cost_weight,
00260                   OptionBase::learntoption,
00261                   "The training weight for the input reconstruction");
00262 
00263     declareOption(ol, "hidden_reconstruction_cost_weight", &DenoisingRecurrentNet::hidden_reconstruction_cost_weight,
00264                   OptionBase::learntoption,
00265                   "The training weight for the hidden reconstruction");
00266 
00267     declareOption(ol, "nb_stage_reconstruction", &DenoisingRecurrentNet::nb_stage_reconstruction,
00268                   OptionBase::learntoption,
00269                   "The nomber of stage for de reconstructions");
00270 
00271     declareOption(ol, "nb_stage_target", &DenoisingRecurrentNet::nb_stage_target,
00272                   OptionBase::learntoption,
00273                   "The nomber of stage for de target");
00274 
00275     declareOption(ol, "L1_penalty_factor",
00276                   &DenoisingRecurrentNet::L1_penalty_factor,
00277                   OptionBase::buildoption,
00278                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00279                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| "
00280                   "during training.\n");
00281 
00282     declareOption(ol, "L2_penalty_factor",
00283                   &DenoisingRecurrentNet::L2_penalty_factor,
00284                   OptionBase::buildoption,
00285                   "Optional (default=0) factor of L2 regularization term, i.e.\n"
00286                   "minimize 0.5 * L2_penalty_factor * sum_{ij} weights(i,j)^2 "
00287                   "during training.\n");
00288                   
00289                   
00290                   
00291 
00292  /*
00293     declareOption(ol, "", &DenoisingRecurrentNet::,
00294                   OptionBase::learntoption,
00295                   "");
00296      */
00297 
00298     // Now call the parent class' declareOptions
00299     inherited::declareOptions(ol);
00300 }
00301 
00302 void DenoisingRecurrentNet::build_()
00303 {
00304     // ### This method should do the real building of the object,
00305     // ### according to set 'options', in *any* situation.
00306     // ### Typical situations include:
00307     // ###  - Initial building of an object from a few user-specified options
00308     // ###  - Building of a "reloaded" object: i.e. from the complete set of
00309     // ###    all serialised options.
00310     // ###  - Updating or "re-building" of an object after a few "tuning"
00311     // ###    options have been modified.
00312     // ### You should assume that the parent class' build_() has already been
00313     // ### called.
00314 
00315     MODULE_LOG << "build_() called" << endl;
00316 
00317     if(train_set)
00318     {
00319         use_target_layers_masks = (encoding=="raw_masked_supervised");
00320 
00321         PLASSERT( target_layers_weights.length() == target_layers.length() );
00322         PLASSERT( target_connections.length() == target_layers.length() );
00323         PLASSERT( target_layers.length() > 0 );
00324         PLASSERT( input_layer );
00325         PLASSERT( hidden_layer );
00326         PLASSERT( input_connections );
00327 
00328         // Parsing symbols in input
00329         int input_layer_size = 0;
00330         input_symbol_sizes.resize(0);
00331         PP<Dictionary> dict;
00332         int inputsize_without_masks = inputsize() 
00333             - ( use_target_layers_masks ? targetsize() : 0 );
00334         for(int i=0; i<inputsize_without_masks; i++)
00335         {
00336             dict = train_set->getDictionary(i);
00337             if(dict)
00338             {
00339                 if( dict->size() == 0 )
00340                     PLERROR("DenoisingRecurrentNet::build_(): dictionary "
00341                         "of field %d is empty", i);
00342                 input_symbol_sizes.push_back(dict->size());
00343                 // Adjust size to include one-hot vector
00344                 input_layer_size += dict->size();
00345             }
00346             else
00347             {
00348                 input_symbol_sizes.push_back(-1);
00349                 input_layer_size++;
00350             }
00351         }
00352 /*
00353         if( input_layer->size != input_layer_size )
00354             PLERROR("DenoisingRecurrentNet::build_(): input_layer->size %d "
00355                     "should be %d", input_layer->size, input_layer_size);
00356 */
00357         // Parsing symbols in target
00358         int tar_layer = 0;
00359         int tar_layer_size = 0;
00360         target_symbol_sizes.resize(target_layers.length());
00361         for( tar_layer=0; tar_layer<target_layers.length(); tar_layer++ )
00362             target_symbol_sizes[tar_layer].resize(0);
00363 
00364         target_layers_n_of_target_elements.resize( targetsize() );
00365         target_layers_n_of_target_elements.clear();
00366         tar_layer = 0;
00367         for( int tar=0; tar<targetsize(); tar++)
00368         {
00369             if( tar_layer > target_layers.length() )
00370                 PLERROR("DenoisingRecurrentNet::build_(): target layers "
00371                         "does not cover all targets.");            
00372 
00373             dict = train_set->getDictionary(tar+inputsize());
00374             if(dict)
00375             {
00376                 if( use_target_layers_masks )
00377                     PLERROR("DenoisingRecurrentNet::build_(): masks for "
00378                             "symbolic targets is not implemented.");
00379                 if( dict->size() == 0 )
00380                     PLERROR("DenoisingRecurrentNet::build_(): dictionary "
00381                             "of field %d is empty", tar);
00382 
00383                 target_symbol_sizes[tar_layer].push_back(dict->size());
00384                 target_layers_n_of_target_elements[tar_layer]++;
00385                 tar_layer_size += dict->size();
00386             }
00387             else
00388             {
00389                 target_symbol_sizes[tar_layer].push_back(-1);
00390                 target_layers_n_of_target_elements[tar_layer]++;
00391                 tar_layer_size++;
00392             }
00393 
00394             if( target_layers[tar_layer]->size == tar_layer_size )
00395             {
00396                 tar_layer++;
00397                 tar_layer_size = 0;
00398             }
00399         }
00400 
00401         //if( tar_layer != target_layers.length() )
00402         //    PLERROR("DenoisingRecurrentNet::build_(): target layers "
00403         //            "does not cover all targets.");
00404 
00405 
00406         // Building weights and layers
00407         if( !input_layer->random_gen )
00408         {
00409             input_layer->random_gen = random_gen;
00410             input_layer->forget();
00411         }
00412 
00413         if( !hidden_layer->random_gen )
00414         {
00415             hidden_layer->random_gen = random_gen;
00416             hidden_layer->forget();
00417         }
00418 
00419         input_connections->down_size = input_layer->size;
00420         input_connections->up_size = hidden_layer->size;
00421         if( !input_connections->random_gen )
00422         {
00423             input_connections->random_gen = random_gen;
00424             input_connections->forget();
00425         }
00426         input_connections->build();
00427 
00428 
00429         if( dynamic_connections )
00430         {
00431             dynamic_connections->down_size = hidden_layer->size;
00432             dynamic_connections->up_size = hidden_layer->size;
00433             if( !dynamic_connections->random_gen )
00434             {
00435                 dynamic_connections->random_gen = random_gen;
00436                 dynamic_connections->forget();
00437             }
00438             dynamic_connections->build();
00439         }
00440 
00441         if( dynamic_reconstruction_connections )
00442         {
00443 
00444             dynamic_reconstruction_connections->down_size = hidden_layer->size;
00445             dynamic_reconstruction_connections->up_size = hidden_layer->size;
00446             if( !dynamic_reconstruction_connections->random_gen )
00447             {
00448                 dynamic_reconstruction_connections->random_gen = random_gen;
00449                 dynamic_reconstruction_connections->forget();
00450             }
00451             dynamic_reconstruction_connections->build();
00452             
00453         }
00454 
00455         if( hidden_layer2 )
00456         {
00457             if( !hidden_layer2->random_gen )
00458             {
00459                 hidden_layer2->random_gen = random_gen;
00460                 hidden_layer2->forget();
00461             }
00462 
00463             PLASSERT( hidden_connections );
00464 
00465             hidden_connections->down_size = hidden_layer->size;
00466             hidden_connections->up_size = hidden_layer2->size;
00467             if( !hidden_connections->random_gen )
00468             {
00469                 hidden_connections->random_gen = random_gen;
00470                 hidden_connections->forget();
00471             }
00472             hidden_connections->build();
00473         }
00474 
00475         for( int tar_layer = 0; tar_layer < target_layers.length(); tar_layer++ )
00476         {
00477             PLASSERT( target_layers[tar_layer] );
00478             PLASSERT( target_connections[tar_layer] );
00479 
00480             if( !target_layers[tar_layer]->random_gen )
00481             {
00482                 target_layers[tar_layer]->random_gen = random_gen;
00483                 target_layers[tar_layer]->forget();
00484             }
00485 
00486             if( hidden_layer2 )
00487                 target_connections[tar_layer]->down_size = hidden_layer2->size;
00488             else
00489                 target_connections[tar_layer]->down_size = hidden_layer->size;
00490 
00491             target_connections[tar_layer]->up_size = target_layers[tar_layer]->size;
00492             if( !target_connections[tar_layer]->random_gen )
00493             {
00494                 target_connections[tar_layer]->random_gen = random_gen;
00495                 target_connections[tar_layer]->forget();
00496             }
00497             target_connections[tar_layer]->build();
00498         }
00499 
00500     }
00501 }
00502 
00503 // ### Nothing to add here, simply calls build_
00504 void DenoisingRecurrentNet::build()
00505 {
00506     inherited::build();
00507     build_();
00508 }
00509 
00510 
00511 void DenoisingRecurrentNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00512 {
00513     inherited::makeDeepCopyFromShallowCopy(copies);
00514 
00515     // Public fields
00516     deepCopyField( target_layers_weights, copies );
00517     deepCopyField( input_layer, copies);
00518     deepCopyField( target_layers , copies);
00519     deepCopyField( hidden_layer, copies);
00520     deepCopyField( hidden_layer2 , copies);
00521     deepCopyField( dynamic_connections , copies);
00522     deepCopyField( dynamic_reconstruction_connections , copies);
00523     deepCopyField( hidden_connections , copies);
00524     deepCopyField( input_connections , copies);
00525     deepCopyField( target_connections , copies);
00526     deepCopyField( target_layers_n_of_target_elements, copies);
00527     deepCopyField( input_symbol_sizes, copies);
00528     deepCopyField( target_symbol_sizes, copies);
00529     deepCopyField( mean_encoded_vec, copies);
00530     deepCopyField( input_reconstruction_bias, copies);
00531     deepCopyField( hidden_reconstruction_bias, copies);
00532     deepCopyField( hidden_reconstruction_bias2, copies);
00533 
00534     // Protected fields
00535     deepCopyField( data, copies);
00536     deepCopyField( acc_target_connections_gr, copies);
00537     deepCopyField( acc_input_connections_gr, copies);
00538     deepCopyField( acc_dynamic_connections_gr, copies);
00539     deepCopyField( acc_reconstruction_dynamic_connections_gr, copies);
00540     deepCopyField( acc_target_bias_gr, copies);
00541     deepCopyField( acc_hidden_bias_gr, copies);
00542     deepCopyField( acc_recons_bias_gr, copies);
00543     deepCopyField( bias_gradient , copies);
00544     deepCopyField( visi_bias_gradient , copies);
00545     deepCopyField( hidden_gradient , copies);
00546     deepCopyField( hidden_temporal_gradient , copies);
00547     deepCopyField( hidden_list , copies);
00548     deepCopyField( hidden_act_no_bias_list , copies);
00549     deepCopyField( hidden2_list , copies);
00550     deepCopyField( hidden2_act_no_bias_list , copies);
00551     deepCopyField( target_prediction_list , copies);
00552     deepCopyField( target_prediction_act_no_bias_list , copies);
00553     deepCopyField( input_list , copies);
00554     deepCopyField( targets_list , copies);
00555     deepCopyField( nll_list , copies);
00556     deepCopyField( masks_list , copies);
00557     deepCopyField( dynamic_act_no_bias_contribution, copies);
00558     deepCopyField( trainset_boundaries, copies);
00559     deepCopyField( testset_boundaries, copies);
00560     deepCopyField( seq, copies);
00561     deepCopyField( encoded_seq, copies);
00562     deepCopyField( clean_encoded_seq, copies);
00563     deepCopyField( input_reconstruction_prob, copies);
00564     deepCopyField( hidden_reconstruction_prob, copies);
00565     
00566 
00567     // deepCopyField(, copies);
00568 
00569     //PLERROR("DenoisingRecurrentNet::makeDeepCopyFromShallowCopy(): "
00570     //"not implemented yet");
00571 }
00572 
00573 
00574 int DenoisingRecurrentNet::outputsize() const
00575 {
00576     int out_size = 0;
00577     for( int i=0; i<target_layers.length(); i++ )
00578         out_size += target_layers[i]->size;
00579     return out_size;
00580 }
00581 
00582 void DenoisingRecurrentNet::forget()
00583 {
00584     inherited::forget();
00585 
00586     input_layer->forget();
00587     hidden_layer->forget();
00588     input_connections->forget();
00589     if( dynamic_connections )
00590         dynamic_connections->forget();
00591     if( dynamic_reconstruction_connections )
00592         dynamic_reconstruction_connections->forget();
00593     if( hidden_layer2 )
00594     {
00595         hidden_layer2->forget();
00596         hidden_connections->forget();
00597     }
00598 
00599     for( int i=0; i<target_layers.length(); i++ )
00600     {
00601         target_layers[i]->forget();
00602         target_connections[i]->forget();
00603     }
00604 
00605     input_reconstruction_bias.clear();
00606 
00607     stage = 0;
00608 }
00609 
00610 void DenoisingRecurrentNet::trainUnconditionalPredictor()
00611 {
00612     MODULE_LOG << "trainUnconditionalPredictor() called " << endl;
00613 
00614     // reserve memory for sequences
00615     seq.resize(5000,2); // contains the current sequence
00616 
00617     // real weight = 0; // Unused
00618     Vec train_costs( getTrainCostNames().length() );
00619     train_costs.fill(-1);
00620 
00621     if( !initTrain() )
00622     {
00623         MODULE_LOG << "train() aborted" << endl;
00624         return;
00625     }
00626 
00627 
00628     if( stage==0 && nstages==1 )
00629     {        
00630         // clear stats of previous epoch
00631         train_stats->forget();
00632 
00633 
00634         int nvecs = 0;
00635         int nseq = nSequences();        
00636 
00637         ProgressBar* pb = 0;
00638         if( report_progress)
00639             pb = new ProgressBar( "Sequences ",nseq);
00640         for(int i=0; i<nseq; i++)
00641         {
00642             getSequence(i, seq);
00643             encodeSequenceAndPopulateLists(seq, false);
00644             if(i==0)
00645             {
00646                 mean_encoded_vec.resize(encoded_seq.width());
00647                 mean_encoded_vec.clear();
00648             }
00649             for(int t=0; t<encoded_seq.length(); t++)
00650             {
00651                 mean_encoded_vec += encoded_seq(t);                
00652                 nvecs++;
00653             }
00654         }
00655         mean_encoded_vec *= 1./nvecs;            
00656         train_stats->update(train_costs);
00657         train_stats->finalize();            
00658 
00659         if( pb )
00660         {
00661             delete pb;
00662             pb = 0;
00663         }
00664         ++stage;
00665     }
00666 }
00667 
00668 
00669 void DenoisingRecurrentNet::train()
00670 {
00671     if(input_window_size==0)
00672     {
00673         trainUnconditionalPredictor();
00674         return;
00675     }
00676 
00677     MODULE_LOG << "train() called " << endl;
00678 
00679     // reserve memory for sequences
00680     seq.resize(5000,2); // contains the current sequence
00681 
00682     // real weight = 0; // Unused
00683     Vec train_costs( getTrainCostNames().length() );
00684     train_costs.clear();
00685     Vec train_n_items( getTrainCostNames().length() );
00686 
00687     if( !initTrain() )
00688     {
00689         MODULE_LOG << "train() aborted" << endl;
00690         return;
00691     }
00692 
00693     ProgressBar* pb = 0;
00694 
00695     // clear stats of previous epoch
00696     train_stats->forget();
00697 
00698 
00699     /***** Recurrent phase *****/
00700     if( stage >= nstages )
00701         return;
00702 
00703     if( stage < nstages )
00704     {        
00705 
00706         MODULE_LOG << "Training the whole model" << endl;
00707 
00708         int init_stage = stage;
00709         //int end_stage = max(0,nstages-(rbm_nstages + dynamic_nstages));
00710         int end_stage = nstages;
00711 
00712         MODULE_LOG << "  stage = " << stage << endl;
00713         MODULE_LOG << "  end_stage = " << end_stage << endl;
00714         MODULE_LOG << "  input_noise_prob = " <<                input_noise_prob  << endl;              
00715         MODULE_LOG << "  input_reconstruction_lr = " <<         input_reconstruction_lr  << endl;       
00716         MODULE_LOG << "  hidden_noise_prob = " <<               hidden_noise_prob  << endl;             
00717         MODULE_LOG << "  hidden_reconstruction_lr = " <<        hidden_reconstruction_lr  << endl;      
00718         MODULE_LOG << "  noisy_recurrent_lr = " <<              noisy_recurrent_lr  << endl;            
00719         MODULE_LOG << "  dynamic_gradient_scale_factor = " <<   dynamic_gradient_scale_factor  << endl; 
00720         MODULE_LOG << "  recurrent_lr = " <<                    recurrent_lr  << endl;                  
00721 
00722 
00723         if( report_progress && stage < end_stage )
00724             pb = new ProgressBar( "Recurrent training phase of "+classname(),
00725                                   end_stage - init_stage );
00726 
00727         int nCost = 2;
00728         train_costs.resize(train_costs.length() + nCost);
00729         train_n_items.resize(train_n_items.length() + nCost);
00730         while(stage < end_stage)
00731         {
00732             train_costs.clear();
00733             train_n_items.clear();
00734 
00735             int nseq = nSequences();
00736             for(int i=0; i<nseq; i++)
00737             {
00738 
00739                 if(input_noise_prob!=0 )
00740                     noise = true;
00741                 else
00742                     noise = false;
00743 
00744                 
00745                 
00746                 
00747                 getSequence(i, seq);
00748                 encodeSequenceAndPopulateLists(seq, false);
00749 
00750                 
00751               
00752                 //bool corrupt_input = false;//input_noise_prob!=0 && (noisy_recurrent_lr!=0 || input_reconstruction_lr!=0);
00753 
00754                 //clean_encoded_seq.resize(encoded_seq.length(), encoded_seq.width());
00755                 //clean_encoded_seq << encoded_seq;
00756 
00757                 //if(corrupt_input)  // WARNING: encoded_sequence will be dirty!!!!
00758                       //  inject_zero_forcing_noise(encoded_seq, input_noise_prob);
00759 
00760                 // recurrent no noise phase
00761                 if(stage >= nb_stage_reconstruction){
00762                     if(recurrent_lr!=0)
00763                     {
00764                         
00765                         setLearningRate( recurrent_lr );                    
00766                         recurrentFprop(train_costs, train_n_items);
00767                         recurrentUpdate(0,0,1, prediction_cost_weight,1, train_costs, train_n_items );
00768                         
00769                     }
00770                 }
00771 
00772                 if(stage < nb_stage_reconstruction || nb_stage_reconstruction == 0 ){
00773                 
00774 
00775                     // greedy phase hidden
00776                     if(hidden_reconstruction_lr!=0){
00777                         
00778                         setLearningRate( hidden_reconstruction_lr);
00779                         
00780                         recurrentFprop(train_costs, train_n_items, true);
00781                         //recurrentUpdate(0, hidden_reconstruction_cost_weight, 1, 0,1, train_costs, train_n_items );
00782                         recurrentUpdate(0, hidden_reconstruction_cost_weight, 1, 0,1, train_costs, train_n_items );
00783                     }
00784                 
00785                     /*if(recurrent_lr!=0)
00786                       {                 
00787                       setLearningRate( recurrent_lr );                    
00788                       recurrentFprop(train_costs, train_n_items);
00789                       //recurrentUpdate(0,0,1, prediction_cost_weight,0, train_costs, train_n_items );
00790                       recurrentUpdate(0,0,0, prediction_cost_weight,0, train_costs, train_n_items );
00791                       
00792                       }*/
00793                     
00794                     // greedy phase input
00795                     if(input_reconstruction_lr!=0){
00796                         if (noise)
00797                             encodeSequenceAndPopulateLists(seq, true);
00798                         setLearningRate( input_reconstruction_lr );
00799                         recurrentFprop(train_costs, train_n_items, false);
00800                         if (noise)
00801                             encodeSequenceAndPopulateLists(seq, false);
00802                         //recurrentUpdate(input_reconstruction_cost_weight, 0, 1, 0,1, train_costs, train_n_items );
00803                         recurrentUpdate(input_reconstruction_cost_weight, 0, 1, 0,1, train_costs, train_n_items );
00804                     }
00805                     
00806                     
00807                     
00808                     
00809                 }
00810 
00811                 // recurrent no noise phase
00812                 /*if(stage>=nb_stage_reconstruction && stage<nb_stage_target+nb_stage_reconstruction){
00813                     if(recurrent_lr!=0)
00814                     {
00815                         
00816                         if(noise) // need to recover the clean sequence                        
00817                             encoded_seq << clean_encoded_seq;                  
00818                         setLearningRate( recurrent_lr );                    
00819                         recurrentFprop(train_costs, train_n_items);
00820                         recurrentUpdate(0,0,1, prediction_cost_weight,0, train_costs, train_n_items );
00821                         
00822                         }
00823                     }*/
00824 
00825                 
00826 
00827 
00828                 // recurrent noisy phase
00829                 if(noisy_recurrent_lr!=0)
00830                 {
00831                     setLearningRate( noisy_recurrent_lr );
00832                     recurrentFprop(train_costs, train_n_items);
00833                     recurrentUpdate(input_reconstruction_cost_weight, hidden_reconstruction_cost_weight, 1,1, prediction_cost_weight, train_costs, train_n_items );
00834                 }
00835 
00836                 
00837             }
00838             noise= false;
00839             if( pb )
00840                 pb->update( stage + 1 - init_stage);
00841             
00842             //double totalCosts = 0;
00843             for(int i=0; i<train_costs.length(); i++)
00844             {
00845                 
00846                 if (train_costs[i] <= 0 || train_n_items[i] <= 0 ){
00847                     train_costs[i] = 1;
00848                     train_n_items[i] = 1; 
00849                 }
00850                 
00851                 if (i < target_layers_weights.length()){
00852                     if( !fast_exact_is_equal(target_layers_weights[i],0) ){
00853                         train_costs[i] /= train_n_items[i];
00854                         //totalCosts += train_costs[i]*target_layers_weights[i];
00855                     }
00856                     else
00857                         train_costs[i] = MISSING_VALUE;
00858                 }
00859                 
00860                 if (i == train_costs.length()-nCost ){
00861                     train_costs[i] /= train_n_items[i];
00862                     //totalCosts += train_costs[i]*input_reconstruction_cost_weight;
00863                 }
00864                 else if (i == train_costs.length()-1)
00865                     train_costs[i] /= train_n_items[i];
00866                 
00867             }
00868 
00869             if(verbosity>0)
00870                 cout << "mean costs at stage " << stage << 
00871                     " = " << train_costs << endl;
00872             stage++;
00873             train_stats->update(train_costs);
00874         }
00875 
00876         if( pb )
00877         {
00878             delete pb;
00879             pb = 0;
00880         }
00881     }
00882 
00883     train_stats->finalize();        
00884 }
00885 
00886 
00888 void DenoisingRecurrentNet::encodeSequenceAndPopulateLists(Mat seq, bool doNoise) const
00889 {
00890     if(encoding=="raw_masked_supervised") // old already encoded format (for backward testing)
00891         splitRawMaskedSupervisedSequence(seq, doNoise);
00892     else if(encoding=="generic")
00893         encode_artificialData(seq);
00894     else if(encoding=="note_octav_duration")
00895         encodeAndCreateSupervisedSequence(seq);
00896     else if(encoding=="diffNote_duration")
00897         encodeAndCreateSupervisedSequence2(seq);
00898 }
00899 
00900 // encodes sequ, then populates: input_list, targets_list, masks_list
00901 void DenoisingRecurrentNet::encodeAndCreateSupervisedSequence2(Mat seq) const
00902 {
00903      if(use_target_layers_masks)
00904         PLERROR("Bug: use_target_layers_masks is expected to be false (no masks) when in encodeAndCreateSupervisedSequence");
00905 
00906     encodeSequence(seq, encoded_seq);
00907     // now work with encoded_seq
00908     Vec tempoTar;
00909     int l = encoded_seq.length();
00910     resize_lists(l-input_window_size);
00911 
00912 
00913     int ntargets = target_layers.length();
00914     targets_list.resize(ntargets);
00915    
00916     for(int tar=0; tar<ntargets; tar++)
00917     {
00918         int targsize = target_layers[tar]->size;
00919     
00920         targets_list[tar].resize(l-input_window_size, targsize);   
00921     }  
00922     int startTar;
00923     for(int t=input_window_size; t<l; t++)
00924     {
00925 
00926         input_list[t-input_window_size] = encoded_seq.subMatRows(t-input_window_size,input_window_size).toVec();
00927         startTar = 43;
00928         for(int tar=0; tar<ntargets; tar++)
00929         {
00930             int targsize = target_layers[tar]->size;
00931             targets_list[tar](t-input_window_size) << encoded_seq(t).subVec(startTar,targsize);
00932             startTar += targsize;
00933         }
00934     }
00935 }
00936 
00937 // encodes sequ, then populates: input_list, targets_list, masks_list
00938 void DenoisingRecurrentNet::encodeAndCreateSupervisedSequence(Mat seq) const
00939 {
00940     if(use_target_layers_masks)
00941         PLERROR("Bug: use_target_layers_masks is expected to be false (no masks) when in encodeAndCreateSupervisedSequence");
00942 
00943     encodeSequence(seq, encoded_seq);
00944     // now work with encoded_seq
00945     int l = encoded_seq.length();
00946     resize_lists(l-input_window_size);
00947 
00948 
00949     int ntargets = target_layers.length();
00950     targets_list.resize(ntargets);
00951     //Mat targets = targets_list[0];
00952     //targets.resize(l, encoded_seq.width());
00953     targets_list[0].resize(l-input_window_size, encoded_seq.width());   
00954          
00955     for(int t=input_window_size; t<l; t++)
00956     {
00957 
00958         input_list[t-input_window_size] = encoded_seq.subMatRows(t-input_window_size,input_window_size).toVec();
00959         //perr << "t-input_window_size = " << endl;
00960         //perr << "subMat:" << endl << encoded_seq.subMatRows(t-input_window_size,input_window_size) << endl;
00961         //perr << "toVec:" << endl << encoded_seq.subMatRows(t-input_window_size,input_window_size).toVec() << endl;
00962         //perr << "input_list:" << endl << input_list[t-input_window_size] << endl;
00963         // target is copied so that when adding noise to input, it doesn't modify target 
00964         //targets(t-input_window_size) << encoded_seq(t);
00965         targets_list[0](t-input_window_size) << encoded_seq(t);
00966     }
00967 }
00968 
00969 
00970 
00971 
00972 // For the (backward testing) raw_masked_supervised case. Populates: input_list, targets_list, masks_list
00973 void DenoisingRecurrentNet::splitRawMaskedSupervisedSequence(Mat seq, bool doNoise) const
00974 {
00975     int l = seq.length();
00976     resize_lists(l);
00977     int inputsize_without_masks = inputsize()-targetsize();
00978     Mat input_part;
00979     input_part.resize(seq.length(),inputsize_without_masks);
00980     input_part << seq.subMatColumns(0,inputsize_without_masks);
00981     Mat mask_part = seq.subMatColumns(inputsize_without_masks, targetsize());
00982     Mat target_part = seq.subMatColumns(inputsize_without_masks+targetsize(), targetsize());
00983 
00984     if(doNoise)
00985         inject_zero_forcing_noise(input_part, input_noise_prob);
00986 
00987     for(int i=0; i<l; i++)
00988         input_list[i] = input_part(i);
00989 
00990     int ntargets = target_layers.length();
00991     targets_list.resize(ntargets);
00992     masks_list.resize(ntargets);
00993     int startcol = 0; // starting column of next target in target_part and mask_part
00994     for(int k=0; k<ntargets; k++)
00995     {
00996         int targsize = target_layers[k]->size;
00997         targets_list[k] = target_part.subMatColumns(startcol, targsize);
00998         masks_list[k] = mask_part.subMatColumns(startcol, targsize);
00999         startcol += targsize;
01000     }
01001 
01002     encoded_seq.resize(input_part.length(), input_part.width());
01003     encoded_seq << input_part;
01004 }
01005 
01006 void DenoisingRecurrentNet::encode_artificialData(Mat seq) const
01007 {
01008     int l = seq.length();
01009     int theInputsize = inputsize();
01010     int theTargetsize = targetsize();
01011     resize_lists(l);
01012     //int inputsize_without_masks = inputsize-targetsize;
01013     Mat input_part;
01014     input_part.resize(seq.length(),theInputsize);
01015     input_part << seq.subMatColumns(0,theInputsize);
01016     //Mat mask_part = seq.subMatColumns(inputsize, targetsize);
01017     Mat target_part = seq.subMatColumns(theInputsize, theTargetsize);
01018 
01019     //if(doNoise)
01020     //    inject_zero_forcing_noise(input_part, input_noise_prob);
01021 
01022     for(int i=0; i<l; i++)
01023         input_list[i] = input_part(i);
01024 
01025     int ntargets = target_layers.length();
01026     targets_list.resize(ntargets);
01027     //masks_list.resize(ntargets);
01028     int startcol = 0; // starting column of next target in target_part and mask_part
01029     for(int k=0; k<ntargets; k++)
01030     {
01031         int targsize = target_layers[k]->size;
01032         targets_list[k] = target_part.subMatColumns(startcol, targsize);
01033         //masks_list[k] = mask_part.subMatColumns(startcol, targsize);
01034         startcol += targsize;
01035     }
01036 
01037     encoded_seq.resize(input_part.length(), input_part.width());
01038     encoded_seq << input_part;
01039 
01040 
01041     /*int l = sequence.length();
01042  
01043     // reserve one extra bit to mean repetition
01044     encoded_sequence.resize(l, 1);
01045     encoded_sequence.clear();
01046 
01047     for(int i=0; i<l; i++)
01048     {
01049         int number = int(sequence(i,0));
01050         encoded_sequence(i,0) = number;        
01051         }    */
01052 }    
01053 
01054 void DenoisingRecurrentNet::resize_lists(int l) const
01055 {
01056     input_list.resize(l);
01057     hidden_list.resize(l, hidden_layer->size);
01058     hidden_act_no_bias_list.resize(l, hidden_layer->size);
01059 
01060     if( hidden_layer2 )
01061     {
01062         hidden2_list.resize(l, hidden_layer2->size);
01063         hidden2_act_no_bias_list.resize(l, hidden_layer2->size);
01064     }
01065 
01066     int ntargets = target_layers.length();
01067     target_prediction_list.resize( ntargets );
01068     target_prediction_act_no_bias_list.resize( ntargets );
01069 
01070     for( int tar=0; tar < ntargets; tar++ )
01071     {
01072         int targsize = target_layers[tar]->size;
01073         target_prediction_list[tar].resize(l, targsize);
01074         target_prediction_act_no_bias_list[tar].resize(l, targsize);
01075     }
01076 
01077     nll_list.resize(l,ntargets);
01078 }
01079 
01080 
01081 // must fill train_costs, train_n_items and     target_prediction_list[0](t)
01082 void DenoisingRecurrentNet::unconditionalFprop(Vec train_costs, Vec train_n_items) const
01083 {
01084     int pred_size = mean_encoded_vec.length();
01085     if(pred_size<=0)
01086         PLERROR("mean_encoded_vec not properly initialized. Did you call trainUnconditionalPredictor prior to unconditionalFprop ?");
01087 
01088     int l = input_list.length();
01089     int tar = 0;
01090     train_n_items[tar] += l;
01091     target_prediction_list[tar].resize(l,pred_size);
01092     for(int i=0; i<l; i++)
01093     {        
01094         Vec target_prediction_i = target_prediction_list[tar](i);
01095         target_prediction_i << mean_encoded_vec;
01096         Vec target_vec = targets_list[tar](i);
01097 
01098         /*
01099         target_layers[tar]->setExpectation(target_prediction_i);
01100         nll_list(i,tar) = target_layers[tar]->fpropNLL(target_vec); 
01101         */
01102         double nllcost = 0;
01103         for(int k=0; k<target_vec.length(); k++)
01104             if(target_vec[k]!=0)
01105                 nllcost -= target_vec[k]*safelog(target_prediction_i[k]);
01106         nll_list(i,tar) = nllcost;
01107 
01108         if (isinf(nll_list(i,tar)))
01109         {
01110             PLWARNING("Row %d of sequence of length %d lead to inf cost",i,l);
01111             perr << "Problem at positions (vec of length " << target_vec.length() << "): ";
01112             for(int k=0; k<target_vec.length(); k++)
01113                 if(target_vec[k]!=0 && target_prediction_i[k]==0)
01114                     perr << k << " ";
01115             perr << endl;
01116             // perr << "target_vec = " << target_vec << endl;
01117             // perr << "target_prediction_i = " << target_prediction_i << endl;
01118         }
01119         else
01120             train_costs[tar] += nll_list(i,tar);
01121     }
01122 }
01123 
01124 // fprop accumulates costs in costs and counts in n_items
01125 void DenoisingRecurrentNet::recurrentFprop(Vec train_costs, Vec train_n_items, bool useDynamicConnections) const
01126 {
01127     int l = input_list.length();
01128     int ntargets = target_layers.length();
01129 
01130     for(int i=0; i<l; i++ )
01131     {
01132         Vec hidden_act_no_bias_i = hidden_act_no_bias_list(i);
01133         input_connections->fprop( input_list[i], hidden_act_no_bias_i);
01134         if(useDynamicConnections){
01135             if( i > 0 && dynamic_connections )
01136             {
01137                 Vec hidden_i_prev = hidden_list(i-1);
01138                 dynamic_connections->fprop(hidden_i_prev,dynamic_act_no_bias_contribution );
01139                 hidden_act_no_bias_i += dynamic_act_no_bias_contribution;
01140             }
01141         }
01142         Vec hidden_i = hidden_list(i);
01143         hidden_layer->fprop( hidden_act_no_bias_i, 
01144                              hidden_i);
01145         
01146         Vec last_hidden = hidden_i;
01147 
01148         if( hidden_layer2 )
01149         {
01150             Vec hidden2_i = hidden2_list(i); 
01151             Vec hidden2_act_no_bias_i = hidden2_act_no_bias_list(i);
01152 
01153             hidden_connections->fprop(hidden_i, hidden2_act_no_bias_i);            
01154             hidden_layer2->fprop(hidden2_act_no_bias_i, hidden2_i);
01155 
01156             last_hidden = hidden2_i; // last hidden layer vec 
01157         }
01158 
01159         for( int tar=0; tar < ntargets; tar++ )
01160         {
01161             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01162             {
01163                 Vec target_prediction_i = target_prediction_list[tar](i);
01164                 Vec target_prediction_act_no_bias_i = target_prediction_act_no_bias_list[tar](i);
01165                 target_connections[tar]->fprop(last_hidden, target_prediction_act_no_bias_i);
01166                 target_layers[tar]->fprop(target_prediction_act_no_bias_i, target_prediction_i);
01167                 if( use_target_layers_masks )
01168                     target_prediction_i *= masks_list[tar](i);
01169 
01170                 target_layers[tar]->activation << target_prediction_act_no_bias_i;
01171                 target_layers[tar]->activation += target_layers[tar]->bias;
01172                 target_layers[tar]->setExpectation(target_prediction_i);
01173 
01174                 Vec target_vec = targets_list[tar](i);
01175                 nll_list(i,tar) = target_layers[tar]->fpropNLL(target_vec); 
01176                 train_costs[tar] += nll_list(i,tar);
01177 
01178                 // Normalize by the number of things to predict
01179                 if( use_target_layers_masks )
01180                     train_n_items[tar] += sum(masks_list[tar](i));
01181                 else
01182                     train_n_items[tar]++;
01183             }
01184         }
01185     }
01186     //if(noise)
01187     //  inject_zero_forcing_noise(hidden_list, input_noise_prob);
01188 }
01189 
01190 
01191 void DenoisingRecurrentNet::applyMultipleSoftmaxToInputWindow(Vec input_reconstruction_activation, Vec input_reconstruction_prob)
01192 {
01193     if(target_layers.length()!=1)
01194         PLERROR("applyMultipleSoftmaxToInputWindow was thought to work with a single target layer which is a RBMMixedLayer combining differnet multinomial costs");
01195 
01196     // int nelems = target_layers[0]->size();
01197     int nelems = target_prediction_list[0].width();
01198 
01199     if(input_reconstruction_activation.length() != input_window_size*nelems)
01200         
01201         PLERROR("Problem: input_reconstruction_activation.length() != input_window_size*nelems  (%d != %d * %d)",input_reconstruction_activation.length(),input_window_size,nelems);
01202 
01203     for(int k=0; k<input_window_size; k++)
01204     {
01205         Vec activation_window = input_reconstruction_activation.subVec(k*nelems, nelems);
01206         Vec prob_window = input_reconstruction_prob.subVec(k*nelems, nelems);
01207         target_layers[0]->fprop(activation_window, prob_window);
01208     }    
01209 }
01210 
01211 Mat DenoisingRecurrentNet::getTargetConnectionsWeightMatrix(int tar)
01212 {
01213     RBMMatrixConnection* conn = dynamic_cast<RBMMatrixConnection*>((RBMConnection*)target_connections[tar]);
01214     if(conn==0)
01215         PLERROR("Expecting input connection to be a RBMMatrixConnection. Je sais c'est sale, mais au point ou on est rendu..");
01216     return conn->weights;
01217 }
01218 
01219 Mat DenoisingRecurrentNet::getInputConnectionsWeightMatrix()
01220 {
01221     RBMMatrixConnection* conn = dynamic_cast<RBMMatrixConnection*>((RBMConnection*)input_connections);
01222     if(conn==0)
01223         PLERROR("Expecting input connection to be a RBMMatrixConnection. Je sais c'est sale, mais au point ou on est rendu..");
01224     return conn->weights;
01225 }
01226 
01227 Mat DenoisingRecurrentNet::getDynamicConnectionsWeightMatrix()
01228 {
01229     RBMMatrixConnection* conn = dynamic_cast<RBMMatrixConnection*>((RBMConnection*)dynamic_connections);
01230     if(conn==0)
01231         PLERROR("Expecting input connection to be a RBMMatrixConnection. Je sais c'est sale, mais au point ou on est rendu..");
01232     return conn->weights;
01233 }
01234 
01235 Mat DenoisingRecurrentNet::getDynamicReconstructionConnectionsWeightMatrix()
01236 {
01237     RBMMatrixConnection* conn = dynamic_cast<RBMMatrixConnection*>((RBMConnection*)dynamic_reconstruction_connections);
01238     if(conn==0)
01239         PLERROR("Expecting input connection to be a RBMMatrixConnection. Je sais c'est sale, mais au point ou on est rendu..");
01240     return conn->weights;
01241 }
01242 
01243 void DenoisingRecurrentNet::updateTargetLayer( Vec& grad, Vec& bias, real& lr )
01244 {
01245     real* b = bias.data();
01246     real* gb = grad.data();
01247     int size = bias.length();
01248 
01249     for( int i=0 ; i<size ; i++ )
01250     {
01251         
01252         b[i] -= lr * gb[i];
01253         
01254     }
01255 
01256    
01257 }
01258 
01259 void DenoisingRecurrentNet::bpropUpdateConnection(const Vec& input, 
01260                                                   const Vec& output,
01261                                                   Vec& input_gradient,
01262                                                   const Vec& output_gradient,
01263                                                   Mat& weights,
01264                                                   Mat& acc_weights_gr,
01265                                                   int& down_size,
01266                                                   int& up_size,
01267                                                   real& lr,
01268                                                   bool accumulate,
01269                                                   bool using_penalty_factor)
01270 {
01271     PLASSERT( input.size() == down_size );
01272     PLASSERT( output.size() == up_size );
01273     PLASSERT( output_gradient.size() == up_size );
01274 
01275     if( accumulate )
01276     {
01277         PLASSERT_MSG( input_gradient.size() == down_size,
01278                       "Cannot resize input_gradient AND accumulate into it" );
01279 
01280         // input_gradient += weights' * output_gradient
01281         transposeProductAcc( input_gradient, weights, output_gradient );
01282     }
01283     else
01284     {
01285         input_gradient.resize( down_size );
01286 
01287         // input_gradient = weights' * output_gradient
01288         transposeProduct( input_gradient, weights, output_gradient );
01289     }
01290 
01291     // weights -= learning_rate * output_gradient * input'
01292     //externalProductScaleAcc( weights, output_gradient, input, -lr );
01293     externalProductScaleAcc( acc_weights_gr, output_gradient, input, -lr );
01294     
01295     if((!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0)) && using_penalty_factor)
01296         applyWeightPenalty(weights, acc_weights_gr, down_size, up_size, lr);
01297 }
01298 
01299 void DenoisingRecurrentNet::bpropUpdateHiddenLayer(const Vec& input, 
01300                                                    const Vec& output,
01301                                                    Vec& input_gradient,
01302                                                    const Vec& output_gradient,                                                
01303                                                    Vec& bias,
01304                                                    real& lr)
01305 {
01306 
01307     int size = bias.length();
01308 
01309     PLASSERT( input.size() == size );
01310     PLASSERT( output.size() == size );
01311     PLASSERT( output_gradient.size() == size );
01312 
01313     
01314     input_gradient.resize( size );
01315     input_gradient.clear();
01316     
01317     
01318     for( int i=0 ; i<size ; i++ )
01319     {
01320         real output_i = output[i];
01321         real in_grad_i;
01322         in_grad_i = output_i * (1-output_i) * output_gradient[i];
01323         input_gradient[i] += in_grad_i;
01324         
01325        
01326         // update the bias: bias -= learning_rate * input_gradient
01327         bias[i] -= lr * in_grad_i;
01328         
01329     }
01330     
01331     //applyBiasDecay();
01332 }
01333 
01334 void DenoisingRecurrentNet::applyWeightPenalty(Mat& weights, Mat& acc_weights_gr, int& down_size, int& up_size, real& lr)
01335 {
01336     // Apply penalty (decay) on weights.
01337     real delta_L1 = lr * L1_penalty_factor;
01338     real delta_L2 = lr * L2_penalty_factor;
01339     /*if (L2_decrease_type == "one_over_t")
01340         delta_L2 /= (1 + L2_decrease_constant * L2_n_updates);
01341     else if (L2_decrease_type == "sigmoid_like")
01342         delta_L2 *= sigmoid((L2_shift - L2_n_updates) * L2_decrease_constant);
01343     else
01344         PLERROR("In RBMMatrixConnection::applyWeightPenalty - Invalid value "
01345                 "for L2_decrease_type: %s", L2_decrease_type.c_str());
01346     */
01347     for( int i=0; i<up_size; i++)
01348     {
01349         real* w_ = weights[i];
01350         real* a_w_g = acc_weights_gr[i];
01351         for( int j=0; j<down_size; j++ )
01352         {
01353             if( delta_L2 != 0. ){
01354                 //w_[j] *= (1 - delta_L2);
01355                 a_w_g[j] -= w_[j]*delta_L2;
01356             }
01357 
01358             if( delta_L1 != 0. )
01359             {
01360                 if( w_[j] > delta_L1 )
01361                     a_w_g[j] -= delta_L1;
01362                 else if( w_[j] < -delta_L1 )
01363                     a_w_g[j] += delta_L1;
01364                 else
01365                     a_w_g[j] = 0.;
01366             }
01367         }
01368     }
01369     /*if (delta_L2 > 0)
01370       L2_n_updates++;*/
01371 }
01372 
01373 double DenoisingRecurrentNet::fpropUpdateInputReconstructionFromHidden(Vec hidden, Mat& reconstruction_weights, Mat& acc_weights_gr, Vec& input_reconstruction_bias, Vec& input_reconstruction_prob, 
01374                                                                        Vec clean_input, Vec hidden_gradient, double input_reconstruction_cost_weight, double lr)
01375 {
01376     double cost = fpropInputReconstructionFromHidden(hidden, reconstruction_weights, input_reconstruction_bias, input_reconstruction_prob, clean_input);
01377     updateInputReconstructionFromHidden(hidden, reconstruction_weights, acc_weights_gr, input_reconstruction_bias, input_reconstruction_prob, 
01378                                         clean_input, hidden_gradient, input_reconstruction_cost_weight, lr);
01379     return cost;
01380 }
01381 
01382 
01385 double DenoisingRecurrentNet::fpropInputReconstructionFromHidden(Vec hidden, Mat reconstruction_weights, Vec& reconstruction_bias, Vec& reconstruction_prob, 
01386                                                                  Vec clean_input)
01387 {
01388     // set appropriate sizes
01389     int fullinputlength = clean_input.length();
01390     Vec reconstruction_activation;
01391     if(reconstruction_bias.length()==0)
01392     {
01393         reconstruction_bias.resize(fullinputlength);
01394         reconstruction_bias.clear();
01395     }
01396     reconstruction_activation.resize(fullinputlength);
01397     reconstruction_prob.resize(fullinputlength);
01398 
01399     // predict (denoised) input_reconstruction 
01400     transposeProduct(reconstruction_activation, reconstruction_weights, hidden); 
01401     reconstruction_activation += reconstruction_bias;
01402 
01403     softmax(reconstruction_activation, reconstruction_prob);
01404 
01405         /*for( int j=0 ; j<fullinputlength ; j++ ){
01406         if(clean_input[j]==1 || clean_input[j]==0)
01407             reconstruction_prob[j] = fastsigmoid( reconstruction_activation[j] );
01408         else
01409             reconstruction_prob[j] = reconstruction_activation[j] ;
01410             }*/
01411 
01412     double result_cost = 0;
01413     if(encoding=="raw_masked_supervised") // || encoding=="generic") // complicated input format... consider it's squared error
01414     {
01415         double r = 0;
01416         double neg_log_cost = 0; // neg log softmax
01417         for(int k=0; k<reconstruction_prob.length(); k++){
01418             if(clean_input[k]==1 || clean_input[k]==0){
01419                 neg_log_cost -= clean_input[k]*safelog(reconstruction_prob[k]) + (1-clean_input[k])*safelog(1-reconstruction_prob[k]);
01420             }                
01421             else{
01422                 r = reconstruction_prob[k] - clean_input[k];
01423                 neg_log_cost += r*r;
01424             }
01425             
01426             
01427         }
01428         result_cost = neg_log_cost;
01429         
01430         /*real r;
01431         //reconstruction_prob << reconstruction_activation;
01432         for(int i=0; i<reconstruction_activation.length(); i++){
01433             r = reconstruction_activation[i] - clean_input[i];
01434             result_cost += r*r;
01435             }*/
01436     }
01437     else // suppose it's a multiple softmax
01438     {
01439         applyMultipleSoftmaxToInputWindow(reconstruction_activation, reconstruction_prob);
01440     
01441         double neg_log_cost = 0; // neg log softmax
01442         for(int k=0; k<reconstruction_prob.length(); k++)
01443             if(clean_input[k]!=0)
01444                 neg_log_cost -= clean_input[k]*safelog(reconstruction_prob[k]);
01445         result_cost = neg_log_cost;
01446     }
01447     return result_cost;
01448 }
01449 
01452 void DenoisingRecurrentNet::updateInputReconstructionFromHidden(Vec hidden, Mat& reconstruction_weights, Mat& acc_weights_gr, Vec& input_reconstruction_bias, Vec input_reconstruction_prob, 
01453                                                                 Vec clean_input, Vec hidden_gradient, double input_reconstruction_cost_weight, double lr)
01454 {
01455     // gradient of -log softmax is just  output_of_softmax - onehot_target
01456     // so let's accumulate this in hidden_gradient
01457     Vec input_reconstruction_activation_grad = input_reconstruction_prob;
01458     input_reconstruction_activation_grad -= clean_input;
01459     input_reconstruction_activation_grad *= input_reconstruction_cost_weight;
01460 
01461     // update bias
01462     multiplyAcc(input_reconstruction_bias, input_reconstruction_activation_grad, -lr);
01463 
01464     // update weight
01465     // THIS IS COMMENTED OUT BECAUSE THE reconstruction_weights ARE tied (same) TO THE input_connection weights, 
01466     // WHICH GET UPDATED LATER IN recurrentUpdate SO IF WE UPDATE THEM HERE THEY WOULD GET UPDATED TWICE.
01467     // WARNING: THIS WOULD NO LONGER BE THE CASE IF THEY WERE NOT TIED!
01468     externalProductScaleAcc(acc_weights_gr, hidden, input_reconstruction_activation_grad, -lr);
01469 
01470     // accumulate in hidden_gradient
01471     productAcc(hidden_gradient, reconstruction_weights, input_reconstruction_activation_grad);
01472 }
01473 
01474 double DenoisingRecurrentNet::fpropHiddenReconstructionFromLastHidden2(Vec theInput, 
01475                                                                       Vec hidden, 
01476                                                                       Mat reconstruction_weights, 
01477                                                                       Mat& acc_weights_gr, 
01478                                                                       Vec& reconstruction_bias, 
01479                                                                       Vec& reconstruction_bias2, 
01480                                                                       Vec hidden_reconstruction_activation_grad, 
01481                                                                       Vec& reconstruction_prob, 
01482                                                                       Vec hidden_target, 
01483                                                                       Vec hidden_gradient, 
01484                                                                       double hidden_reconstruction_cost_weight, 
01485                                                                       double lr)
01486 {
01487     // set appropriate sizes
01488     int fullhiddenlength = hidden_target.length();
01489     Vec reconstruction_activation;
01490     Vec reconstruction_activation2;
01491     Vec reconstruction_prob2;
01492     Vec hidden_act_no_bias;
01493     Vec hidden_exp;
01494     Vec dynamic_act_no_bias_contribution;
01495     Vec hidden_gradient2;
01496     if(reconstruction_bias.length()==0)
01497     {
01498         reconstruction_bias.resize(fullhiddenlength);
01499         reconstruction_bias.clear();
01500     }
01501     if(reconstruction_bias2.length()==0)
01502     {
01503         reconstruction_bias2.resize(fullhiddenlength);
01504         reconstruction_bias2.clear();
01505     }
01506     reconstruction_prob2.resize(fullhiddenlength);
01507     reconstruction_activation.resize(fullhiddenlength);
01508     reconstruction_activation2.resize(fullhiddenlength);
01509     reconstruction_prob.resize(fullhiddenlength);
01510 
01511    
01512     hidden_act_no_bias.resize(fullhiddenlength);
01513     hidden_exp.resize(fullhiddenlength);
01514     dynamic_act_no_bias_contribution.resize(fullhiddenlength);
01515     hidden_gradient2.resize(fullhiddenlength);
01516     
01517 
01518     // predict (denoised) input_reconstruction 
01519     transposeProduct(reconstruction_activation, reconstruction_weights, hidden); //dynamic matrice tied
01520     //product(reconstruction_activation, reconstruction_weights, hidden); //dynamic matrice not tied
01521     reconstruction_activation += reconstruction_bias;
01522 
01523     for( int j=0 ; j<fullhiddenlength ; j++ )
01524         reconstruction_prob[j] = fastsigmoid( reconstruction_activation[j] );
01525 
01526 
01527 
01528      // predict (denoised) input_reconstruction 
01529     transposeProduct(reconstruction_activation2, reconstruction_weights, reconstruction_prob); //dynamic matrice tied
01530     reconstruction_activation2 += reconstruction_bias2;
01531 
01532     for( int j=0 ; j<fullhiddenlength ; j++ )
01533         reconstruction_prob2[j] = fastsigmoid( reconstruction_activation2[j] );
01534 
01535 
01536     //hidden_layer->fprop(reconstruction_activation, reconstruction_prob);
01537 
01538     /********************************************************************************/
01539     hidden_reconstruction_activation_grad.resize(reconstruction_prob.size());
01540     hidden_reconstruction_activation_grad << reconstruction_prob2;
01541     hidden_reconstruction_activation_grad -= hidden_target;
01542     hidden_reconstruction_activation_grad *= hidden_reconstruction_cost_weight;
01543     
01544 
01545     productAcc(hidden_gradient2, reconstruction_weights, hidden_reconstruction_activation_grad); //dynamic matrice tied
01546     //transposeProductAcc(hidden_gradient, reconstruction_weights, hidden_reconstruction_activation_grad); //dynamic matrice not tied
01547     
01548     //update bias
01549     multiplyAcc(reconstruction_bias2, hidden_reconstruction_activation_grad, -lr);
01550     // update weight
01551     externalProductScaleAcc(acc_weights_gr, hidden, hidden_reconstruction_activation_grad, -lr); //dynamic matrice tied
01552     //externalProductScaleAcc(acc_weights_gr, hidden_reconstruction_activation_grad, hidden, -lr); //dynamic matrice not tied
01553     
01554     hidden_reconstruction_activation_grad.clear();
01555 
01556     //update bias
01557     for( int i=0 ; i<fullhiddenlength ; i++ )
01558     {
01559         real in_grad_i;
01560         in_grad_i = reconstruction_prob[i] * (1-reconstruction_prob[i]) * hidden_gradient2[i];
01561         hidden_reconstruction_activation_grad[i] += in_grad_i;
01562         
01563        
01564         // update the bias: bias -= learning_rate * input_gradient
01565         reconstruction_bias[i] -= lr * in_grad_i;
01566         
01567     }
01568 
01569     productAcc(hidden_gradient, reconstruction_weights, hidden_reconstruction_activation_grad); //dynamic matrice tied
01570 
01571     // update weight
01572     externalProductScaleAcc(acc_weights_gr, hidden, hidden_reconstruction_activation_grad, -lr); //dynamic matrice tied
01573     
01574   
01575     //update bias2
01576     //multiplyAcc(reconstruction_bias2, hidden_gradient, -lr);
01577     /********************************************************************************/
01578     // Vec hidden_reconstruction_activation_grad;
01579     /*hidden_reconstruction_activation_grad.clear();
01580     for(int k=0; k<reconstruction_prob.length(); k++){
01581         //    hidden_reconstruction_activation_grad[k] = safelog(1-reconstruction_prob[k]) - safelog(reconstruction_prob[k]);
01582         hidden_reconstruction_activation_grad[k] = - reconstruction_activation[k];
01583         }*/
01584 
01585     double result_cost = 0;
01586     double neg_log_cost = 0; // neg log softmax
01587     for(int k=0; k<reconstruction_prob.length(); k++){
01588         //if(hidden_target[k]!=0)
01589         neg_log_cost -= hidden_target[k]*safelog(reconstruction_prob[k]) + (1-hidden_target[k])*safelog(1-reconstruction_prob[k]);
01590     }
01591     result_cost = neg_log_cost;
01592     
01593     return result_cost;
01594 }
01595 
01596 double DenoisingRecurrentNet::fpropHiddenReconstructionFromLastHidden(Vec theInput, 
01597                                                                       Vec hidden, 
01598                                                                       Mat reconstruction_weights, 
01599                                                                       Mat& acc_weights_gr, 
01600                                                                       Vec& reconstruction_bias, 
01601                                                                       Vec& reconstruction_bias2, 
01602                                                                       Vec hidden_reconstruction_activation_grad, 
01603                                                                       Vec& reconstruction_prob, 
01604                                                                       Vec hidden_target, 
01605                                                                       Vec hidden_gradient, 
01606                                                                       double hidden_reconstruction_cost_weight, 
01607                                                                       double lr)
01608 {
01609     // set appropriate sizes
01610     int fullhiddenlength = hidden_target.length();
01611     Vec reconstruction_activation;
01612     Vec hidden_input_noise;
01613     Vec hidden_fprop_noise;
01614     Vec hidden_act_no_bias;
01615     Vec hidden_exp;
01616     Vec dynamic_act_no_bias_contribution;
01617     if(reconstruction_bias.length()==0)
01618     {
01619         reconstruction_bias.resize(fullhiddenlength);
01620         reconstruction_bias.clear();
01621     }
01622     if(reconstruction_bias2.length()==0)
01623     {
01624         reconstruction_bias2.resize(fullhiddenlength);
01625         reconstruction_bias2.clear();
01626     }
01627     reconstruction_activation.resize(fullhiddenlength);
01628     reconstruction_prob.resize(fullhiddenlength);
01629 
01630     hidden_fprop_noise.resize(fullhiddenlength);
01631     hidden_input_noise.resize(fullhiddenlength);
01632     hidden_act_no_bias.resize(fullhiddenlength);
01633     hidden_exp.resize(fullhiddenlength);
01634     dynamic_act_no_bias_contribution.resize(fullhiddenlength);
01635 
01636     input_connections->fprop( theInput, hidden_act_no_bias);
01637     hidden_input_noise << hidden_target;
01638     inject_zero_forcing_noise(hidden_input_noise, input_noise_prob);
01639     dynamic_connections->fprop(hidden_input_noise, dynamic_act_no_bias_contribution );
01640     hidden_act_no_bias += dynamic_act_no_bias_contribution;
01641     hidden_layer->fprop( hidden_act_no_bias, hidden_exp);
01642     //hidden_act_no_bias += reconstruction_bias2;
01643     //for( int j=0 ; j<fullhiddenlength ; j++ )
01644     //    hidden_fprop_noise[j] = fastsigmoid(hidden_act_no_bias[j] );
01645 
01646     // predict (denoised) input_reconstruction 
01647     transposeProduct(reconstruction_activation, reconstruction_weights, hidden_exp); //dynamic matrice tied
01648     //product(reconstruction_activation, reconstruction_weights, hidden_exp); //dynamic matrice not tied
01649     reconstruction_activation += reconstruction_bias;
01650 
01651     for( int j=0 ; j<fullhiddenlength ; j++ )
01652         reconstruction_prob[j] = fastsigmoid( reconstruction_activation[j] );
01653 
01654     //hidden_layer->fprop(reconstruction_activation, reconstruction_prob);
01655 
01656     /********************************************************************************/
01657     hidden_reconstruction_activation_grad.resize(reconstruction_prob.size());
01658     hidden_reconstruction_activation_grad << reconstruction_prob;
01659     hidden_reconstruction_activation_grad -= hidden_target;
01660     hidden_reconstruction_activation_grad *= hidden_reconstruction_cost_weight;
01661     
01662 
01663     productAcc(hidden_gradient, reconstruction_weights, hidden_reconstruction_activation_grad); //dynamic matrice tied
01664     //transposeProductAcc(hidden_gradient, reconstruction_weights, hidden_reconstruction_activation_grad); //dynamic matrice not tied
01665     
01666     //update bias
01667     multiplyAcc(reconstruction_bias, hidden_reconstruction_activation_grad, -lr);
01668     // update weight
01669     externalProductScaleAcc(acc_weights_gr, hidden, hidden_reconstruction_activation_grad, -lr); //dynamic matrice tied
01670     //externalProductScaleAcc(acc_weights_gr, hidden_reconstruction_activation_grad, hidden, -lr); //dynamic matrice not tied
01671                 
01672     //update bias2
01673     //multiplyAcc(reconstruction_bias2, hidden_gradient, -lr);
01674     /********************************************************************************/
01675     // Vec hidden_reconstruction_activation_grad;
01676     /*hidden_reconstruction_activation_grad.clear();
01677     for(int k=0; k<reconstruction_prob.length(); k++){
01678         //    hidden_reconstruction_activation_grad[k] = safelog(1-reconstruction_prob[k]) - safelog(reconstruction_prob[k]);
01679         hidden_reconstruction_activation_grad[k] = - reconstruction_activation[k];
01680         }*/
01681 
01682     double result_cost = 0;
01683     double neg_log_cost = 0; // neg log softmax
01684     for(int k=0; k<reconstruction_prob.length(); k++){
01685         //if(hidden_target[k]!=0)
01686         neg_log_cost -= hidden_target[k]*safelog(reconstruction_prob[k]) + (1-hidden_target[k])*safelog(1-reconstruction_prob[k]);
01687     }
01688     result_cost = neg_log_cost;
01689     
01690     return result_cost;
01691 }
01692 
01693 double DenoisingRecurrentNet::fpropHiddenSymmetricDynamicMatrix(Vec hidden, Mat reconstruction_weights, Vec& reconstruction_prob, 
01694                                                                  Vec hidden_target, Vec hidden_gradient, double hidden_reconstruction_cost_weight, double lr)
01695 {
01696     // set appropriate sizes
01697     int fullinputlength = hidden_target.length();
01698     Vec reconstruction_activation;
01699    
01700     reconstruction_activation.resize(fullinputlength);
01701     reconstruction_prob.resize(fullinputlength);
01702 
01703     // predict (denoised) input_reconstruction 
01704     transposeProduct(reconstruction_activation, reconstruction_weights, hidden); //truc de stan
01705     //product(reconstruction_activation, reconstruction_weights, hidden); 
01706     //reconstruction_activation += hidden_layer->bias;
01707     
01708     hidden_layer->fprop(reconstruction_activation, reconstruction_prob);
01709 
01710     /********************************************************************************/
01711     Vec hidden_reconstruction_activation_grad;
01712     hidden_reconstruction_activation_grad.resize(reconstruction_prob.size());
01713     hidden_reconstruction_activation_grad << reconstruction_prob;
01714     hidden_reconstruction_activation_grad -= hidden_target;
01715     hidden_reconstruction_activation_grad *= hidden_reconstruction_cost_weight;
01716 
01717     productAcc(hidden_gradient, reconstruction_weights, hidden_reconstruction_activation_grad);
01718     /********************************************************************************/
01719 
01720     double result_cost = 0;
01721     double neg_log_cost = 0; // neg log softmax
01722     for(int k=0; k<reconstruction_prob.length(); k++)
01723         if(hidden_target[k]!=0)
01724             neg_log_cost -= hidden_target[k]*safelog(reconstruction_prob[k]);
01725     result_cost = neg_log_cost;
01726     
01727     return result_cost;
01728 }
01729 
01730 /*
01731 input_list
01732 targets_list
01733 masks_list
01734 hidden_list
01735 hidden_act_no_bias_list
01736 hidden2_list
01737 hidden2_act_no_bias_list
01738 target_prediction_list
01739 target_prediction_act_no_bias_list
01740 nll_list
01741 */
01742 /*
01743 void DenoisingRecurrentNet::recurrentUpdate(real input_reconstruction_weight,
01744                                             real hidden_reconstruction_weight,
01745                                             real temporal_gradient_contribution)
01746 {
01747     hidden_temporal_gradient.resize(hidden_layer->size);
01748     hidden_temporal_gradient.clear();
01749     for(int i=hidden_list.length()-1; i>=0; i--){   
01750 
01751         if( hidden_layer2 )
01752             hidden_gradient.resize(hidden_layer2->size);
01753         else
01754             hidden_gradient.resize(hidden_layer->size);
01755         hidden_gradient.clear();
01756         if( prediction_cost_weight!=0 )
01757         {
01758             for( int tar=0; tar<target_layers.length(); tar++)
01759             {
01760                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01761                 {
01762                     target_layers[tar]->activation << target_prediction_act_no_bias_list[tar](i);
01763                     target_layers[tar]->activation += target_layers[tar]->bias;
01764                     target_layers[tar]->setExpectation(target_prediction_list[tar](i));
01765                     target_layers[tar]->bpropNLL(targets_list[tar](i),nll_list(i,tar),bias_gradient);
01766                     bias_gradient *= prediction_cost_weight;
01767                     if(use_target_layers_masks)
01768                         bias_gradient *= masks_list[tar](i);
01769                     target_layers[tar]->update(bias_gradient);
01770                     if( hidden_layer2 )
01771                         target_connections[tar]->bpropUpdate(hidden2_list(i),target_prediction_act_no_bias_list[tar](i),
01772                                                              hidden_gradient, bias_gradient,true);
01773                     else
01774                         target_connections[tar]->bpropUpdate(hidden_list(i),target_prediction_act_no_bias_list[tar](i),
01775                                                              hidden_gradient, bias_gradient,true);
01776                 }
01777             }
01778 
01779             if (hidden_layer2)
01780             {
01781                 hidden_layer2->bpropUpdate(
01782                     hidden2_act_no_bias_list(i), hidden2_list(i),
01783                     bias_gradient, hidden_gradient);
01784                 
01785                 hidden_connections->bpropUpdate(
01786                     hidden_list(i),
01787                     hidden2_act_no_bias_list(i), 
01788                     hidden_gradient, bias_gradient);
01789             }
01790         }
01791             
01792         // Add contribution of input reconstruction cost in hidden_gradient
01793         if(input_reconstruction_weight!=0)
01794         {
01795             Mat reconstruction_weights = getInputConnectionsWeightMatrix();
01796             Vec clean_input = clean_encoded_seq.subMatRows(i, input_window_size).toVec();
01797 
01798             fpropUpdateInputReconstructionFromHidden(hidden_list(i), reconstruction_weights, input_reconstruction_bias, input_reconstruction_prob, 
01799                                                      clean_input, hidden_gradient, hidden_reconstruction_weight, current_learning_rate);
01800         }
01801 
01802 
01803         if(i!=0 && dynamic_connections )
01804         {   
01805 
01806 
01807             hidden_layer->bpropUpdate(
01808                 hidden_act_no_bias_list(i), hidden_list(i),
01809                 hidden_temporal_gradient, hidden_gradient);
01810             input_connections->bpropUpdate(
01811                 input_list[i],
01812                 hidden_act_no_bias_list(i), 
01813                 visi_bias_gradient, hidden_temporal_gradient);// Here, it should be activations - cond_bias, but doesn't matter
01814                 
01815 
01816 
01817             // Add contribution of hidden reconstruction cost in hidden_gradient
01818             if(hidden_reconstruction_weight!=0)
01819             {
01820                 Mat reconstruction_weights = getDynamicConnectionsWeightMatrix();
01821                 //truc stan
01822                 fpropHiddenReconstructionFromLastHidden(hidden_list(i-1), reconstruction_weights, hidden_reconstruction_prob, hidden_list(i), hidden_gradient, hidden_reconstruction_weight, current_learning_rate);
01823                 //fpropHiddenReconstructionFromLastHidden(hidden_list(i), reconstruction_weights, hidden_reconstruction_prob, hidden_list(i-1), hidden_gradient, hidden_reconstruction_weight, current_learning_rate);
01824             
01825             }
01826             // add contribution to gradient of next time step hidden layer
01827             if(temporal_gradient_contribution>0)
01828             { // add weighted contribution of hidden_temporal gradient to hidden_gradient
01829                 // It does this: hidden_gradient += temporal_gradient_contribution*hidden_temporal_gradient;
01830                 multiplyAcc(hidden_gradient, hidden_temporal_gradient, temporal_gradient_contribution);
01831             }
01832   
01833 
01834             hidden_layer->bpropUpdate(
01835                 hidden_act_no_bias_list(i), hidden_list(i),
01836                 hidden_temporal_gradient, hidden_gradient);
01837                 
01838             dynamic_connections->bpropUpdate(
01839                 hidden_list(i-1),
01840                 hidden_act_no_bias_list(i), // Here, it should be dynamic_act_no_bias_contribution, but doesn't matter because a RBMMatrixConnection::bpropUpdate doesn't use its second argument
01841                 hidden_gradient, hidden_temporal_gradient);
01842                 
01843             
01844             hidden_temporal_gradient << hidden_gradient;                
01845         }
01846         else
01847         {
01848             hidden_layer->bpropUpdate(
01849                 hidden_act_no_bias_list(i), hidden_list(i),
01850                 hidden_temporal_gradient, hidden_gradient); // Not really temporal gradient, but this is the final iteration...
01851             input_connections->bpropUpdate(
01852                 input_list[i],
01853                 hidden_act_no_bias_list(i), 
01854                 visi_bias_gradient, hidden_temporal_gradient);// Here, it should be activations - cond_bias, but doesn't matter
01855 
01856         }
01857     }
01858     
01859 }
01860 
01861 */
01862 void DenoisingRecurrentNet::recurrentUpdate(real input_reconstruction_weight,
01863                                             real hidden_reconstruction_weight,
01864                                             real temporal_gradient_contribution,
01865                                             real predic_cost_weight,
01866                                             real inputAndDynamicPart,
01867                                             Vec train_costs, 
01868                                             Vec train_n_items )
01869 {
01870     TVec < Mat> targetWeights ;
01871     Mat inputWeights;
01872     Mat dynamicWeights;
01873     Mat reconsWeights;
01874     targetWeights.resize(target_connections.length());
01875     for( int tar=0; tar<target_layers.length(); tar++)
01876     {
01877        targetWeights[tar] = getTargetConnectionsWeightMatrix(tar);
01878     }
01879     inputWeights = getInputConnectionsWeightMatrix();
01880     if(dynamic_connections )
01881     { 
01882         dynamicWeights = getDynamicConnectionsWeightMatrix();
01883         reconsWeights = getDynamicReconstructionConnectionsWeightMatrix();
01884     }
01885     acc_target_connections_gr.resize(target_connections.length());
01886     for( int tar=0; tar<target_layers.length(); tar++)
01887     {
01888         acc_target_connections_gr[tar].resize(target_connections[tar]->up_size, target_connections[tar]->down_size);
01889         acc_target_connections_gr[tar].clear();
01890     }
01891     acc_input_connections_gr.resize(input_connections->up_size, input_connections->down_size);
01892     acc_input_connections_gr.clear();
01893     if(dynamic_connections )
01894     { 
01895         acc_dynamic_connections_gr.resize(dynamic_connections->up_size, dynamic_connections->down_size);
01896         acc_dynamic_connections_gr.clear();
01897         acc_reconstruction_dynamic_connections_gr.resize(dynamic_connections->down_size, dynamic_connections->up_size);
01898         acc_reconstruction_dynamic_connections_gr.clear();
01899     }
01900 
01901 
01902     hidden_temporal_gradient.resize(hidden_layer->size);
01903     hidden_temporal_gradient.clear();
01904     for(int i=hidden_list.length()-1; i>=0; i--){   
01905 
01906         if( hidden_layer2 )
01907             hidden_gradient.resize(hidden_layer2->size);
01908         else
01909             hidden_gradient.resize(hidden_layer->size);
01910         hidden_gradient.clear();
01911         if( predic_cost_weight!=0 )
01912         {
01913             for( int tar=0; tar<target_layers.length(); tar++)
01914             {
01915                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
01916                 {
01917                     target_layers[tar]->activation << target_prediction_act_no_bias_list[tar](i);
01918                     target_layers[tar]->activation += target_layers[tar]->bias;
01919                     target_layers[tar]->setExpectation(target_prediction_list[tar](i));
01920                     target_layers[tar]->bpropNLL(targets_list[tar](i),nll_list(i,tar),bias_gradient);
01921                     bias_gradient *= predic_cost_weight;
01922                     if(use_target_layers_masks)
01923                         bias_gradient *= masks_list[tar](i);
01924                     //target_layers[tar]->update(bias_gradient);
01925                     updateTargetLayer( bias_gradient, 
01926                                        target_layers[tar]->bias, 
01927                                        target_layers[tar]->learning_rate );
01928                     //Mat targetWeights = getTargetConnectionsWeightMatrix(tar);
01929                     if( hidden_layer2 ){
01930                         //target_connections[tar]->bpropUpdate(hidden2_list(i),target_prediction_act_no_bias_list[tar](i),hidden_gradient, bias_gradient,true);
01931                         bpropUpdateConnection(hidden2_list(i),
01932                                               target_prediction_act_no_bias_list[tar](i),
01933                                               hidden_gradient, 
01934                                               bias_gradient,
01935                                               targetWeights[tar],
01936                                               acc_target_connections_gr[tar],
01937                                               target_connections[tar]->down_size,
01938                                               target_connections[tar]->up_size,
01939                                               target_connections[tar]->learning_rate,
01940                                               true,
01941                                               false);
01942                     }
01943                     else{
01944                         //target_connections[tar]->bpropUpdate(hidden_list(i),target_prediction_act_no_bias_list[tar](i),hidden_gradient, bias_gradient,true);
01945                         bpropUpdateConnection(hidden_list(i),
01946                                               target_prediction_act_no_bias_list[tar](i),
01947                                               hidden_gradient, 
01948                                               bias_gradient,
01949                                               targetWeights[tar],
01950                                               acc_target_connections_gr[tar],
01951                                               target_connections[tar]->down_size,
01952                                               target_connections[tar]->up_size,
01953                                               target_connections[tar]->learning_rate,
01954                                               true,
01955                                               false);
01956                     }
01957                 }
01958             }
01959 
01960             if (hidden_layer2)
01961             {
01962                 hidden_layer2->bpropUpdate(
01963                     hidden2_act_no_bias_list(i), hidden2_list(i),
01964                     bias_gradient, hidden_gradient);
01965                 
01966                 hidden_connections->bpropUpdate(
01967                     hidden_list(i),
01968                     hidden2_act_no_bias_list(i), 
01969                     hidden_gradient, bias_gradient);
01970             }
01971         }
01972 
01973         if(inputAndDynamicPart){   
01974             // Add contribution of input reconstruction cost in hidden_gradient
01975             if(input_reconstruction_weight!=0)
01976             {
01977                 //Mat reconstruction_weights = getInputConnectionsWeightMatrix();
01978                 //Vec clean_input = clean_encoded_seq.subMatRows(i, input_window_size).toVec();
01979                 
01980                 train_costs[train_costs.length()-2] += fpropUpdateInputReconstructionFromHidden(hidden_list(i), inputWeights, acc_input_connections_gr, input_reconstruction_bias, input_reconstruction_prob, 
01981                                                                            input_list[i], hidden_gradient, input_reconstruction_weight, current_learning_rate);
01982                 train_n_items[train_costs.length()-2]++;
01983             }
01984             
01985             //if(i!=0 && dynamic_connections )
01986             if(i>1 && dynamic_connections )
01987             {   
01988                 
01989                 // Add contribution of hidden reconstruction cost in hidden_gradient
01990                 Vec hidden_reconstruction_activation_grad;
01991                 hidden_reconstruction_activation_grad.resize(hidden_layer->size);
01992                 //Mat reconstruction_weights = getDynamicConnectionsWeightMatrix();
01993                 if(hidden_reconstruction_weight!=0)
01994                 {
01995                     //Vec hidden_reconstruction_activation_grad;
01996                     //Mat reconstruction_weights = getDynamicConnectionsWeightMatrix();
01997                     
01998                     //truc stan
01999                     //fpropHiddenSymmetricDynamicMatrix(hidden_list(i-1), reconstruction_weights, hidden_reconstruction_prob, hidden_list(i), hidden_gradient, hidden_reconstruction_weight, current_learning_rate);
02000                     
02001                     train_costs[train_costs.length()-1] += fpropHiddenReconstructionFromLastHidden(input_list[i], 
02002                                                                                                    hidden_list(i), 
02003                                                                                                    dynamicWeights, //reconsWeights, //dynamicWeights, 
02004                                                                                                    acc_dynamic_connections_gr, //acc_reconstruction_dynamic_connections_gr, //acc_dynamic_connections_gr, 
02005                                                                                                    hidden_reconstruction_bias, 
02006                                                                                                    hidden_reconstruction_bias2, 
02007                                                                                                    hidden_reconstruction_activation_grad, 
02008                                                                                                    hidden_reconstruction_prob, 
02009                                                                                                    hidden_list(i-1), 
02010                                                                                                    hidden_gradient, 
02011                                                                                                    hidden_reconstruction_weight, 
02012                                                                                                    current_learning_rate);
02013                     
02014                     
02015                     /*
02016                     train_costs[train_costs.length()-1] += fpropHiddenReconstructionFromLastHidden2(input_list[i], 
02017                                                                                                    hidden_list(i), 
02018                                                                                                    dynamicWeights, //reconsWeights, //dynamicWeights, 
02019                                                                                                    acc_dynamic_connections_gr, //acc_reconstruction_dynamic_connections_gr, //acc_dynamic_connections_gr, 
02020                                                                                                    hidden_reconstruction_bias, 
02021                                                                                                    hidden_reconstruction_bias2, 
02022                                                                                                    hidden_reconstruction_activation_grad, 
02023                                                                                                    hidden_reconstruction_prob, 
02024                                                                                                    hidden_list(i-2), 
02025                                                                                                    hidden_gradient, 
02026                                                                                                    hidden_reconstruction_weight, 
02027                                                                                                    current_learning_rate);
02028                     */
02029 
02030                     //fpropHiddenReconstructionFromLastHidden(hidden_list(i), reconsWeights, acc_reconstruction_dynamic_connections_gr, hidden_reconstruction_bias, hidden_reconstruction_activation_grad, hidden_reconstruction_prob, hidden_list(i-1), hidden_gradient, hidden_reconstruction_weight, current_learning_rate);
02031                     train_n_items[train_costs.length()-1]++;
02032                 }
02033                 
02034                 
02035                 // add contribution to gradient of next time step hidden layer
02036                 if(temporal_gradient_contribution>0)
02037                 { // add weighted contribution of hidden_temporal gradient to hidden_gradient
02038                     // It does this: hidden_gradient += temporal_gradient_contribution*hidden_temporal_gradient;
02039                     multiplyAcc(hidden_gradient, hidden_temporal_gradient, temporal_gradient_contribution);
02040                     
02041                 }
02042                 
02043                 
02044                 
02045                 
02046                
02047                 bpropUpdateHiddenLayer(hidden_act_no_bias_list(i), 
02048                                        hidden_list(i),
02049                                        hidden_temporal_gradient, 
02050                                        hidden_gradient,
02051                                        hidden_layer->bias, 
02052                                        hidden_layer->learning_rate );
02053                 
02054                 
02055                 //input
02056                 //if(hidden_reconstruction_weight==0)
02057                 //{
02058                    
02059                     
02060                 bpropUpdateConnection(input_list[i],
02061                                       hidden_act_no_bias_list(i), 
02062                                       visi_bias_gradient, 
02063                                       hidden_temporal_gradient,// Here, it should be activations - cond_bias, but doesn't matter
02064                                       inputWeights,
02065                                       acc_input_connections_gr,
02066                                       input_connections->down_size,
02067                                       input_connections->up_size,
02068                                       input_connections->learning_rate,
02069                                       false,
02070                                       true);
02071                     //}
02072                 
02073                 //Dynamic
02074                 //if(input_reconstruction_weight==0)
02075                 //{
02076                     /*bpropUpdateHiddenLayer(hidden_act_no_bias_list(i), 
02077                                        hidden_list(i),
02078                                        hidden_temporal_gradient, 
02079                                        hidden_gradient,
02080                                        hidden_layer->bias, 
02081                                        hidden_layer->learning_rate );*/
02082 
02083                 bpropUpdateConnection(hidden_list(i-1),
02084                                       hidden_act_no_bias_list(i), // Here, it should be dynamic_act_no_bias_contribution, but doesn't matter because a RBMMatrixConnection::bpropUpdate doesn't use its second argument
02085                                       hidden_gradient, 
02086                                       hidden_temporal_gradient, 
02087                                       dynamicWeights,
02088                                       acc_dynamic_connections_gr,
02089                                       dynamic_connections->down_size,
02090                                       dynamic_connections->up_size,
02091                                       dynamic_connections->learning_rate,
02092                                       false,
02093                                       false);
02094                     //}
02095                 
02096                 hidden_temporal_gradient << hidden_gradient; 
02097                 //if(hidden_reconstruction_weight!=0)
02098                 //    hidden_temporal_gradient +=  hidden_reconstruction_activation_grad;
02099             }
02100             else
02101             {
02102                 if(input_reconstruction_weight==0)
02103                 {
02104                     bpropUpdateHiddenLayer(hidden_act_no_bias_list(i), 
02105                                            hidden_list(i),
02106                                            hidden_temporal_gradient, // Not really temporal gradient, but this is the final iteration...
02107                                            hidden_gradient,
02108                                            hidden_layer->bias, 
02109                                            hidden_layer->learning_rate );
02110                     
02111                     //input
02112                     bpropUpdateConnection(input_list[i],
02113                                           hidden_act_no_bias_list(i), 
02114                                           visi_bias_gradient, 
02115                                           hidden_temporal_gradient,// Here, it should be activations - cond_bias, but doesn't matter
02116                                           inputWeights,
02117                                           acc_input_connections_gr,
02118                                           input_connections->down_size,
02119                                           input_connections->up_size,
02120                                           input_connections->learning_rate,
02121                                           false,
02122                                           true);
02123                 }
02124             }
02125         }
02126     }
02127     
02128     
02129     //update matrice's connections
02130     for( int tar=0; tar<target_layers.length(); tar++)
02131     {
02132         multiplyAcc(targetWeights[tar], acc_target_connections_gr[tar], 1);
02133     }
02134     multiplyAcc(inputWeights, acc_input_connections_gr, 1);
02135     
02136     if(dynamic_connections )
02137     {
02138         multiplyAcc(dynamicWeights, acc_dynamic_connections_gr, 1);
02139         //multiplyAcc(reconsWeights, acc_reconstruction_dynamic_connections_gr, 1);
02140     }
02141     
02142     
02143 
02144 
02145      /* int r;
02146     int modulo;
02147     if(input_reconstruction_weight!=0)
02148         modulo = 2;
02149     else
02150         modulo=3;
02151     
02152     r = rand() % modulo +1;
02153    
02154    
02155     if(r==1)
02156     {
02157         multiplyAcc(inputWeights, acc_input_connections_gr, 1);
02158     }
02159     else if (r==2){
02160         if(dynamic_connections )
02161         {
02162             multiplyAcc(dynamicWeights, acc_dynamic_connections_gr, 1);
02163             //multiplyAcc(reconsWeights, acc_reconstruction_dynamic_connections_gr, 1);
02164         }
02165     }
02166     else {
02167         //update matrice's connections
02168         for( int tar=0; tar<target_layers.length(); tar++)
02169         {
02170             multiplyAcc(targetWeights[tar], acc_target_connections_gr[tar], 1);
02171         }
02172         }*/
02173 }
02174 
02175 
02176 /* TO DO:
02177 verifier nombre de temps
02178 implementer correctement duration_to_number_of_timeframes(duration)
02179 declare nouvelles options et valeurs par defaut correctes
02180 */
02181 
02182 
02183 /*
02184   
02185 Frequences dans le trainset:
02186 
02187 **NOTES**
02188 0  DO            0.0872678308077029924            
02189 1  DO#           0.00716010857716887095                   
02190 2  RE            0.178895847137025221                   
02191 3  RE#           0.0037189817684399511                   
02192 4  MI            0.114241135358112283                   
02193 5  FA            0.00517237694231303512                   
02194 6  FA#           0.0806848056083954851                   
02195 7  SOL           0.194776326757432616                   
02196 8  SOL#          0.00301365763994271892                   
02197 9  LA            0.13988928548528437                   
02198 10 LA#           0.00369760831000064084                   
02199 11 SI            0.181482035608181741                   
02200 
02201 **OCTAVES**
02202 0  OCT1          0.362130506337230429                   
02203 1  OCT2          0.574048346762989659                   
02204 2  OCT3          0.0635219184816295107                   
02205 3  OCT4          0.000299228418150340866                
02206 
02207 **DUREES**
02208 0  1/8           0.00333425951653236984                   
02209 1  1/6           0.000170987667514480506                   
02210 2  1/4           0.0386432128582725951                   
02211 3  1/3           0.00716010857716887095                   
02212 4  2/4           0.569880522367324227                   
02213 5  2/3           0                               
02214 6  3/4           0.00220146621924893673                   
02215 7  4/4           0.305896937183405604                   
02216 8  5/4           4.27469168786201266e-05                   
02217 9  6/4           0.0222283967768824656                   
02218 10 8/4           0.0365058670143415878                   
02219 11 10/4          0.000876311796011712552                   
02220 12 12/4          0.0078440592472267933                   
02221 13 14/4          6.41203753179301933e-05                   
02222 14 16/4          0.00331288605809306001                   
02223 15 18/4          8.54938337572402532e-05                   
02224 16 20/4          0.000726697586936542119                   
02225 17 24/4          0.000619830294739991887                   
02226 18 28/4          0.000149614209075170433                   
02227 19 32/4          0.000256481501271720773         
02228 
02229  */
02230 
02231 
02232 /*
02233 Format de donnees:
02234 
02235 matrice de 2 colonnes:
02236 note, duree
02237 
02238 note: midi_number (21..108 numero de touche sur piano)
02239       ou 0  (silence)
02240       ou -1 (missing)
02241       ou -999 (fin de sequence)
02242 
02243 duree: voir indices (colonne de gauche) et DUREES dans table de frequences ci-dessus
02244        1 unite correspond a une noire.
02245 
02246  */
02247 
02248 void DenoisingRecurrentNet::encodeSequence(Mat sequence, Mat& encoded_seq) const
02249 {
02251     int prepend_zero_rows = input_window_size;
02252 
02253     // reserve some minimum space for encoded_seq
02254     encoded_seq.resize(5000, 4);
02255 
02256     if(encoding=="timeframe")
02257         encode_onehot_timeframe(sequence, encoded_seq, prepend_zero_rows);
02258     else if(encoding=="note_duration")
02259         encode_onehot_note_octav_duration(sequence, encoded_seq, prepend_zero_rows, false, 0);
02260     else if(encoding=="note_octav_duration")
02261         encode_onehot_note_octav_duration(sequence, encoded_seq, prepend_zero_rows, false, 4);    
02262     else if(encoding=="diffNote_duration")
02263         encode_onehot_diffNote_duration(sequence, encoded_seq, false);
02264     else if(encoding=="raw_masked_supervised")
02265         PLERROR("raw_masked_supervised means already encoded! You shouldnt have landed here!!!");
02266     else if(encoding=="generic")
02267         PLERROR("generic means already encoded! You shouldnt have landed here!!!");
02268     else
02269         PLERROR("unsupported encoding: %s",encoding.c_str());
02270 }
02271 
02272 
02273 void DenoisingRecurrentNet::getSequence(int i, Mat& seq) const
02274 { 
02275     int start = 0;
02276     if(i>0)
02277         start = trainset_boundaries[i-1]+1;
02278     int end = trainset_boundaries[i];
02279     int w = train_set->width();
02280     seq.resize(end-start, w);
02281     train_set->getMat(start,0,seq);
02282 }
02283 
02284 
02285 void DenoisingRecurrentNet::setTrainingSet(VMat training_set, bool call_forget)
02286 {
02287     inherited::setTrainingSet(training_set, call_forget);
02288     locateSequenceBoundaries(training_set, trainset_boundaries, end_of_sequence_symbol);
02289 }
02290 
02291 
02292 void DenoisingRecurrentNet::locateSequenceBoundaries(VMat dataset, TVec<int>& boundaries, real end_of_sequence_symbol)
02293 {
02294     boundaries.resize(10000);
02295     boundaries.resize(0);
02296     int l = dataset->length();
02297     for(int i=0; i<l; i++)
02298     {
02299         if(dataset(i,0)==end_of_sequence_symbol)
02300             boundaries.append(i);
02301     }
02302 }
02303 
02304 
02305 int DenoisingRecurrentNet::getDurationBit(int duration)
02306 {
02307     if(duration==5)  // map infrequent 5 to 4
02308         duration=4;
02309     return duration;
02310 }
02311 
02312 
02313 // encodings
02314 
02315 
02316 /*
02317   use note_nbits=13 bits for note + octav_nbits bits for octav + duration_nbits bits for duration
02318   bit positions are numbered starting at 0.
02319 
02320   if note is a silence (midi_number==0) then bit at position 12 is on
02321   otherwise bit at position midi_number%12 is on
02322 
02323   To compute octav bit position, we first compute the min and max of midi_number/12
02324   this gives us the octav_min.
02325   Then bit at position note_nbits+(midi_number/12)-octav_min is switched to on.
02326 
02327   bit at position note_nbits+octav_nbits+duration is on
02328  */
02329 
02330 void DenoisingRecurrentNet::encode_onehot_diffNote_duration(Mat sequence, Mat& encoded_sequence,
02331                                                               bool use_silence,  int duration_nbits)
02332 {
02333     int l = sequence.length();
02334     //diff paussible -21 ... -1 0 1 ... 21
02335     // index          0     20 21 22    43
02336     int note_nbits = 43; //de -21 a 21
02337 
02338     encoded_sequence.resize(l,note_nbits+duration_nbits);
02339     encoded_sequence.clear();
02340     
02341     
02342     for(int i=0; i<l; i++)
02343     {
02344         //int midi_number = int(sequence(i,0));
02345 
02346         if(i==0) // silence
02347         {
02348             encoded_sequence(i,21) = 1;
02349         }
02350         else{
02351             int diffNote = int(sequence(i,0))-int(sequence(i-1,0))+21;
02352             encoded_sequence(i,diffNote) = 1;
02353         }
02354 
02355        
02356         int duration_bit = getDurationBit(int(sequence(i,1)));
02357         if(duration_bit<0 || duration_bit>=duration_nbits)
02358             PLERROR("duration_bit out of valid range");
02359         encoded_sequence(i,note_nbits+duration_bit) = 1;
02360     }
02361 }
02362 
02363 void DenoisingRecurrentNet::encode_onehot_note_octav_duration(Mat sequence, Mat& encoded_sequence, int prepend_zero_rows,
02364                                                               bool use_silence, int octav_nbits, int duration_nbits)
02365 {
02366     int l = sequence.length();
02367     int note_nbits = use_silence ?13 :12;
02368 
02369     encoded_sequence.resize(prepend_zero_rows+l,note_nbits+octav_nbits+duration_nbits);
02370     encoded_sequence.clear();
02371     int octav_min = 10000;
02372     int octav_max = -10000;
02373 
02374     if(octav_nbits>0)
02375     {
02376         for(int i=0; i<l; i++)
02377         {
02378             int midi_number = int(sequence(i,0));
02379             int octav = midi_number/12;
02380             if(octav<octav_min)
02381                 octav_min = octav;
02382             if(octav>octav_max)
02383                 octav_max = octav;
02384         }
02385         if(octav_max-octav_min > octav_nbits)
02386             PLERROR("Octav range too big. Does not fit in octav_nbits");
02387     }
02388 
02389     
02390     for(int i=0; i<l; i++)
02391     {
02392         int midi_number = int(sequence(i,0));
02393         if(midi_number==0) // silence
02394         {
02395             if(use_silence)
02396                 encoded_sequence(prepend_zero_rows+i,12) = 1;
02397         }
02398         else
02399             encoded_sequence(prepend_zero_rows+i,midi_number%12) = 1;
02400 
02401         if(octav_nbits>0)
02402         {
02403             int octavpos = midi_number/12-octav_min;
02404             encoded_sequence(prepend_zero_rows+i,note_nbits+octavpos) = 1;
02405         }
02406 
02407         int duration_bit = getDurationBit(int(sequence(i,1)));
02408         if(duration_bit<0 || duration_bit>=duration_nbits)
02409             PLERROR("duration_bit out of valid range");
02410         encoded_sequence(prepend_zero_rows+i,note_nbits+octav_nbits+duration_bit) = 1;
02411     }
02412 }
02413 
02414 
02415 int DenoisingRecurrentNet::duration_to_number_of_timeframes(int duration)
02416 {
02417     PLERROR("duration_to_number_of_timeframes (used only when encoding==timeframe) is not yet implemented");
02418     return duration+1;
02419 }
02420 
02421 /*
02422   use note_nbits+1 bits for note at every timeframe
02423   last bit indicates continuation of the preceeding note.
02424  */
02425 
02426 void DenoisingRecurrentNet::encode_onehot_timeframe(Mat sequence, Mat& encoded_sequence, 
02427                                                     int prepend_zero_rows, bool use_silence)
02428 {
02429     int l = sequence.length();
02430     int newl = 0;
02431 
02432     // First compute length of timeframe sequence
02433     for(int i=0; i<l; i++)
02434     {
02435         int duration = int(sequence(i,1));
02436         newl += duration_to_number_of_timeframes(duration);
02437     }
02438 
02439     int nnotes = use_silence ?13 :12;
02440 
02441     // reserve one extra bit to mean repetition
02442     encoded_sequence.resize(prepend_zero_rows+newl, nnotes+1);
02443     encoded_sequence.clear();
02444 
02445     int k=prepend_zero_rows;
02446     for(int i=0; i<l; i++)
02447     {
02448         int midi_number = int(sequence(i,0));
02449         if(midi_number==0) // silence
02450         {
02451             if(use_silence)
02452                 encoded_sequence(k++,12) = 1;
02453         }
02454         else
02455             encoded_sequence(k++,midi_number%12) = 1;
02456 
02457         int duration = int(sequence(i,1));
02458         int nframes = duration_to_number_of_timeframes(duration);
02459         while(--nframes>0) // setb repetition bit
02460             encoded_sequence(k++,nnotes) = 1;            
02461     }    
02462 }
02463 
02464 
02465 // input noise injection
02466 void DenoisingRecurrentNet::inject_zero_forcing_noise(Mat sequence, double noise_prob) const
02467 {
02468     if(!sequence.isCompact())
02469         PLERROR("Expected a compact sequence");
02470     real* p = sequence.data();
02471     int n = sequence.size();
02472     while(n--)
02473     {
02474         if(*p!=real(0.) && random_gen->uniform_sample()<noise_prob)
02475             *p = real(0.);
02476         ++p;
02477     }
02478 }
02479 
02480 // input noise injection
02481 void DenoisingRecurrentNet::inject_zero_forcing_noise(Vec sequence, double noise_prob) const
02482 {
02483     
02484     real* p = sequence.data();
02485     int n = sequence.size();
02486     while(n--)
02487     {
02488         if(*p!=real(0.) && random_gen->uniform_sample()<noise_prob)
02489             *p = real(0.);
02490         ++p;
02491     }
02492 }
02493 
02494 void DenoisingRecurrentNet::clamp_units(const Vec layer_vector,
02495                                              PP<RBMLayer> layer,
02496                                              TVec<int> symbol_sizes) const
02497 {
02498     int it = 0;
02499     int ss = -1;
02500     for(int i=0; i<layer_vector.length(); i++)
02501     {
02502         ss = symbol_sizes[i];
02503         // If input is a real ...
02504         if(ss < 0) 
02505         {
02506             layer->expectation[it++] = layer_vector[i];
02507         }
02508         else // ... or a symbol
02509         {
02510             // Convert to one-hot vector
02511             layer->expectation.subVec(it,ss).clear();
02512             layer->expectation[it+(int)layer_vector[i]] = 1;
02513             it += ss;
02514         }
02515     }
02516     layer->setExpectation( layer->expectation );
02517 }
02518 
02519 void DenoisingRecurrentNet::clamp_units(const Vec layer_vector,
02520                                              PP<RBMLayer> layer,
02521                                              TVec<int> symbol_sizes,
02522                                              const Vec original_mask,
02523                                              Vec& formated_mask) const
02524 {
02525     int it = 0;
02526     int ss = -1;
02527     PLASSERT( original_mask.length() == layer_vector.length() );
02528     formated_mask.resize(layer->size);
02529     for(int i=0; i<layer_vector.length(); i++)
02530     {
02531         ss = symbol_sizes[i];
02532         // If input is a real ...
02533         if(ss < 0) 
02534         {
02535             formated_mask[it] = original_mask[i];
02536             layer->expectation[it++] = layer_vector[i];
02537         }
02538         else // ... or a symbol
02539         {
02540             // Convert to one-hot vector
02541             layer->expectation.subVec(it,ss).clear();
02542             formated_mask.subVec(it,ss).fill(original_mask[i]);
02543             layer->expectation[it+(int)layer_vector[i]] = 1;
02544             it += ss;
02545         }
02546     }
02547     layer->setExpectation( layer->expectation );
02548 }
02549 
02550 void DenoisingRecurrentNet::setLearningRate( real the_learning_rate )
02551 {
02552     current_learning_rate = the_learning_rate;
02553     input_layer->setLearningRate( the_learning_rate );
02554     hidden_layer->setLearningRate( the_learning_rate );
02555     input_connections->setLearningRate( the_learning_rate );
02556     if( dynamic_connections ){
02557         //dynamic_connections->setLearningRate( dynamic_gradient_scale_factor*the_learning_rate ); 
02558         dynamic_connections->setLearningRate( the_learning_rate ); 
02559     }
02560     if( dynamic_reconstruction_connections ){
02561         //dynamic_reconstruction_connections->setLearningRate( dynamic_gradient_scale_factor*the_learning_rate ); 
02562         dynamic_reconstruction_connections->setLearningRate( the_learning_rate ); 
02563     }
02564     if( hidden_layer2 )
02565     {
02566         hidden_layer2->setLearningRate( the_learning_rate );
02567         hidden_connections->setLearningRate( the_learning_rate );
02568     }
02569 
02570     for( int i=0; i<target_layers.length(); i++ )
02571     {
02572         target_layers[i]->setLearningRate( the_learning_rate );
02573         target_connections[i]->setLearningRate( the_learning_rate );
02574     }
02575 }
02576 
02577 
02578 void DenoisingRecurrentNet::computeOutput(const Vec& input, Vec& output) const
02579 {
02580     PLERROR("DenoisingRecurrentNet::computeOutput(): this is a dynamic, "
02581             "generative model, that can only compute negative log-likelihood "
02582             "costs for a whole VMat");
02583 }
02584 
02585 void DenoisingRecurrentNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
02586                                            const Vec& target, Vec& costs) const
02587 {
02588     PLERROR("DenoisingRecurrentNet::computeCostsFromOutputs(): this is a "
02589             "dynamic, generative model, that can only compute negative "
02590             "log-likelihooh costs for a whole VMat");
02591 }
02592 
02593 
02594 
02595 void DenoisingRecurrentNet::test(VMat testset, PP<VecStatsCollector> test_stats,
02596                   VMat testoutputs, VMat testcosts)const
02597 {
02598     int len = testset.length();
02599 
02600     Vec output(outputsize());
02601     output.clear();
02602 
02603     Vec costs(nTestCosts());
02604     costs.clear();
02605     Vec n_items(nTestCosts());
02606     n_items.clear();
02607 
02608     PP<ProgressBar> pb;
02609     if (report_progress) 
02610         pb = new ProgressBar("Testing learner", len);
02611 
02612     if (len == 0) {
02613         // Empty test set: we give -1 cost arbitrarily.
02614         costs.fill(-1);
02615         test_stats->update(costs);
02616     }
02617 
02618     int w = testset->width();
02619     locateSequenceBoundaries(testset, testset_boundaries, end_of_sequence_symbol);
02620     int nseq = testset_boundaries.length();
02621 
02622     seq.resize(5000,2); // contains the current sequence
02623     encoded_seq.resize(5000, 4);
02624 
02625 
02626     int pos = 0; // position in testoutputs
02627     for(int i=0; i<nseq; i++)
02628     {
02629         int start = 0;
02630         if(i>0)
02631             start = testset_boundaries[i-1]+1;
02632         int end = testset_boundaries[i];
02633         int seqlen = end-start; // target_prediction_list[0].length();
02634         seq.resize(seqlen, w);
02635         testset->getMat(start,0,seq);
02636         encodeSequenceAndPopulateLists(seq, false);
02637 
02638         if(input_window_size==0)
02639             unconditionalFprop(costs, n_items);
02640         else
02641             recurrentFprop(costs, n_items);
02642 
02643         if (testoutputs)
02644         {
02645             for(int t=0; t<seqlen; t++)
02646             {
02647                 int sum_target_layers_size = 0;
02648                 for( int tar=0; tar < target_layers.length(); tar++ )
02649                 {
02650                     if( !fast_exact_is_equal(target_layers_weights[tar],0) )
02651                     {
02652                         output.subVec(sum_target_layers_size,target_layers[tar]->size)
02653                             << target_prediction_list[tar](t);
02654                     }
02655                     sum_target_layers_size += target_layers[tar]->size;
02656                 }
02657                 testoutputs->putOrAppendRow(pos++, output);
02658             }
02659             output.fill(end_of_sequence_symbol);
02660             testoutputs->putOrAppendRow(pos++, output);
02661         }
02662         else
02663             pos += seqlen;
02664 
02665         if (report_progress)
02666             pb->update(pos);
02667     }
02668 
02669     for(int i=0; i<costs.length(); i++)
02670     {
02671         if( !fast_exact_is_equal(target_layers_weights[i],0) )
02672             costs[i] /= n_items[i];
02673         else
02674             costs[i] = MISSING_VALUE;
02675     }
02676     if (testcosts)
02677         testcosts->putOrAppendRow(0, costs);
02678     
02679     if (test_stats)
02680         test_stats->update(costs, 1.);
02681 }
02682 
02683 
02684 TVec<string> DenoisingRecurrentNet::getTestCostNames() const
02685 {
02686     TVec<string> cost_names(0);
02687     for( int i=0; i<target_layers.length(); i++ )
02688         cost_names.append("target" + tostring(i) + ".NLL");
02689     return cost_names;
02690 }
02691 
02692 TVec<string> DenoisingRecurrentNet::getTrainCostNames() const
02693 {
02694     return getTestCostNames();
02695 }
02696 
02697 /*
02698 void DenoisingRecurrentNet::generate(int t, int n)
02699 {
02700     PLERROR("generate not yet implemented");
02701 }
02702 */
02703 
02704 
02705 void DenoisingRecurrentNet::generate(int t, int n)
02706 {
02707     //PPath* the_filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/scoreGen.amat";
02708     data = new AutoVMatrix();
02709     //data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/listData/target_tm12_input_t_tm12_tp12/scoreGen_tar_tm12__in_tm12_tp12.amat";
02710     //data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/create_data/scoreGenSuitePerf.amat";
02711     data->filename = "/home/stan/cvs/Gamme/expressive_data/dataGen.amat";
02712 
02713     data->defineSizes(163,16,0);
02714     //data->inputsize = 21;
02715     //data->targetsize = 0;
02716     //data->weightsize = 0;
02717     data->build();
02718 
02719     
02720     
02721    
02722    
02723 
02724     int len = data->length();
02725     int tarSize = outputsize();
02726     int partTarSize;
02727     Vec input;
02728     Vec target;
02729     real weight;
02730     int targsize;
02731 
02732     Vec output(outputsize());
02733     output.clear();
02734 //     Vec costs(nTestCosts());
02735 //     costs.clear();
02736 //     Vec n_items(nTestCosts());
02737 //     n_items.clear();
02738 
02739     int r,r2;
02740     use_target_layers_masks = true;
02741 
02742     int ith_sample_in_sequence = 0;
02743     int inputsize_without_masks = inputsize() 
02744         - ( use_target_layers_masks ? targetsize() : 0 );
02745     int sum_target_elements = 0;
02746     for (int i = 0; i < len; i++)
02747     {
02748         data->getExample(i, input, target, weight);
02749         if(i>n)
02750         {
02751             for (int k = 1; k <= t; k++)
02752             {
02753                 if(k<=i){
02754                     partTarSize = outputsize();
02755                     for( int tar=0; tar < target_layers.length(); tar++ )
02756                     {
02757                         
02758                         input.subVec(inputsize_without_masks-(tarSize*(t-k))-partTarSize-1,target_layers[tar]->size) << target_prediction_list[tar](ith_sample_in_sequence-k);
02759                         partTarSize -= target_layers[tar]->size;
02760                         
02761                         
02762                     }
02763                 }
02764             }       
02765         }
02766     
02767 
02768 //         for (int k = 1; k <= t; k++)
02769 //         {
02770 //             partTarSize = outputsize();
02771 //             for( int tar=0; tar < target_layers.length(); tar++ )
02772 //             {
02773 //                 if(i>=t){
02774 //                     input.subVec(inputsize_without_masks-(tarSize*(t-k))-partTarSize-1,target_layers[tar]->size) << target_prediction_list[tar](ith_sample_in_sequence-k);
02775 //                     partTarSize -= target_layers[tar]->size;
02776 //                 }
02777 //             }
02778 //         }
02779 
02780         if( fast_exact_is_equal(input[0],end_of_sequence_symbol) )
02781         {
02782 //             ith_sample_in_sequence = 0;
02783 //             hidden_list.resize(0);
02784 //             hidden_act_no_bias_list.resize(0);
02785 //             hidden2_list.resize(0);
02786 //             hidden2_act_no_bias_list.resize(0);
02787 //             target_prediction_list.resize(0);
02788 //             target_prediction_act_no_bias_list.resize(0);
02789 //             input_list.resize(0);
02790 //             targets_list.resize(0);
02791 //             nll_list.resize(0,0);
02792 //             masks_list.resize(0);
02793 
02794             
02795 
02796             continue;
02797         }
02798 
02799         // Resize internal variables
02800         hidden_list.resize(ith_sample_in_sequence+1, hidden_layer->size);
02801         hidden_act_no_bias_list.resize(ith_sample_in_sequence+1, hidden_layer->size);
02802         if( hidden_layer2 )
02803         {
02804             hidden2_list.resize(ith_sample_in_sequence+1, hidden_layer2->size);
02805             hidden2_act_no_bias_list.resize(ith_sample_in_sequence+1, hidden_layer2->size);
02806         }
02807                  
02808         input_list.resize(ith_sample_in_sequence+1);
02809         input_list[ith_sample_in_sequence].resize(input_layer->size);
02810 
02811         targets_list.resize( target_layers.length() );
02812         target_prediction_list.resize( target_layers.length() );
02813         target_prediction_act_no_bias_list.resize( target_layers.length() );
02814         for( int tar=0; tar < target_layers.length(); tar++ )
02815         {
02816             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
02817             {
02818                 targsize = target_layers[tar]->size;
02819                 targets_list[tar].resize( ith_sample_in_sequence+1, targsize);
02820                 //targets_list[tar][ith_sample_in_sequence].resize( target_layers[tar]->size);
02821                 target_prediction_list[tar].resize(
02822                     ith_sample_in_sequence+1, targsize);
02823                 target_prediction_act_no_bias_list[tar].resize(
02824                     ith_sample_in_sequence+1, targsize);
02825             }
02826         }
02827         nll_list.resize(ith_sample_in_sequence+1,target_layers.length());
02828         if( use_target_layers_masks )
02829         {
02830             masks_list.resize( target_layers.length() );
02831             for( int tar=0; tar < target_layers.length(); tar++ )
02832                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
02833                     masks_list[tar].resize( ith_sample_in_sequence+1, target_layers[tar]->size );
02834         }
02835 
02836         // Forward propagation
02837 
02838         // Fetch right representation for input
02839         clamp_units(input.subVec(0,inputsize_without_masks),
02840                     input_layer,
02841                     input_symbol_sizes);                
02842         input_list[ith_sample_in_sequence] << input_layer->expectation;
02843 
02844         // Fetch right representation for target
02845         sum_target_elements = 0;
02846         for( int tar=0; tar < target_layers.length(); tar++ )
02847         {
02848             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
02849             {
02850                 if( use_target_layers_masks )
02851                 {
02852                     Vec masks_list_tar_i = masks_list[tar](ith_sample_in_sequence);
02853                     clamp_units(target.subVec(
02854                                     sum_target_elements,
02855                                     target_layers_n_of_target_elements[tar]),
02856                                 target_layers[tar],
02857                                 target_symbol_sizes[tar],
02858                                 input.subVec(
02859                                     inputsize_without_masks 
02860                                     + sum_target_elements, 
02861                                     target_layers_n_of_target_elements[tar]),
02862                                 masks_list_tar_i
02863                         );
02864                     
02865                 }
02866                 else
02867                 {
02868                     clamp_units(target.subVec(
02869                                     sum_target_elements,
02870                                     target_layers_n_of_target_elements[tar]),
02871                                 target_layers[tar],
02872                                 target_symbol_sizes[tar]);
02873                 }
02874                 targets_list[tar](ith_sample_in_sequence) << 
02875                     target_layers[tar]->expectation;
02876             }
02877             sum_target_elements += target_layers_n_of_target_elements[tar];
02878         }
02879         
02880         Vec hidden_act_no_bias_i = hidden_act_no_bias_list(ith_sample_in_sequence);
02881         input_connections->fprop( input_list[ith_sample_in_sequence], 
02882                                   hidden_act_no_bias_i);
02883                 
02884         if( ith_sample_in_sequence > 0 && dynamic_connections )
02885         {
02886             dynamic_connections->fprop( 
02887                 hidden_list(ith_sample_in_sequence-1),
02888                 dynamic_act_no_bias_contribution );
02889 
02890             hidden_act_no_bias_list(ith_sample_in_sequence) += 
02891                 dynamic_act_no_bias_contribution;
02892         }
02893         
02894         Vec hidden_i = hidden_list(ith_sample_in_sequence);
02895         hidden_layer->fprop( hidden_act_no_bias_i, 
02896                              hidden_i );
02897 
02898         Vec last_hidden = hidden_i;
02899                  
02900         if( hidden_layer2 )
02901         {
02902             Vec hidden2_i = hidden2_list(ith_sample_in_sequence); 
02903             Vec hidden2_act_no_bias_i = hidden2_act_no_bias_list(ith_sample_in_sequence);
02904 
02905             hidden_connections->fprop( 
02906                 hidden2_i,
02907                 hidden2_act_no_bias_i);
02908 
02909             hidden_layer2->fprop( 
02910                 hidden2_act_no_bias_i,
02911                 hidden2_i 
02912                 );
02913 
02914             last_hidden = hidden2_i; // last hidden layer vec 
02915         }
02916            
02917        
02918         for( int tar=0; tar < target_layers.length(); tar++ )
02919         {
02920             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
02921             {
02922                 Vec target_prediction_i = target_prediction_list[tar](i);
02923                 Vec target_prediction_act_no_bias_i = target_prediction_act_no_bias_list[tar](i);
02924                 target_connections[tar]->fprop(
02925                     last_hidden,
02926                     target_prediction_act_no_bias_i
02927                     );
02928                 target_layers[tar]->fprop(
02929                     target_prediction_act_no_bias_i,
02930                     target_prediction_i );
02931                 if( use_target_layers_masks )
02932                     target_prediction_i *= masks_list[tar](ith_sample_in_sequence);
02933             }
02934         }
02935         
02936 
02937         
02938 
02939         sum_target_elements = 0;
02940         for( int tar=0; tar < target_layers.length(); tar++ )
02941         {
02942             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
02943             {
02944                 target_layers[tar]->activation << 
02945                     target_prediction_act_no_bias_list[tar](
02946                         ith_sample_in_sequence);
02947                 target_layers[tar]->activation += target_layers[tar]->bias;
02948                 target_layers[tar]->setExpectation(
02949                     target_prediction_list[tar](
02950                         ith_sample_in_sequence));
02951                 nll_list(ith_sample_in_sequence,tar) = 
02952                     target_layers[tar]->fpropNLL( 
02953                         targets_list[tar](ith_sample_in_sequence) ); 
02954 //                 costs[tar] += nll_list(ith_sample_in_sequence,tar);
02955                 
02956 //                 // Normalize by the number of things to predict
02957 //                 if( use_target_layers_masks )
02958 //                 {
02959 //                     n_items[tar] += sum(
02960 //                         input.subVec( inputsize_without_masks 
02961 //                                       + sum_target_elements, 
02962 //                                       target_layers_n_of_target_elements[tar]) );
02963 //                 }
02964 //                 else
02965 //                 n_items[tar]++;
02966             }
02967             if( use_target_layers_masks )
02968                 sum_target_elements += 
02969                     target_layers_n_of_target_elements[tar];
02970         }
02971         ith_sample_in_sequence++;
02972 
02973         
02974 
02975     }
02976 
02977 //     ith_sample_in_sequence = 0;
02978 //     hidden_list.resize(0);
02979 //     hidden_act_no_bias_list.resize(0);
02980 //     hidden2_list.resize(0);
02981 //     hidden2_act_no_bias_list.resize(0);
02982 //     target_prediction_list.resize(0);
02983 //     target_prediction_act_no_bias_list.resize(0);
02984 //     input_list.resize(0);
02985 //     targets_list.resize(0);
02986 //     nll_list.resize(0,0);
02987 //     masks_list.resize(0);   
02988 
02989     
02990     //Vec tempo;
02991     //TVec<real> tempo;
02992     //tempo.resize(visible_layer->size);
02993     ofstream myfile;
02994     myfile.open ("/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/test.txt");
02995     
02996     for (int i = 0; i < target_prediction_list[0].length() ; i++ ){
02997        
02998        
02999         for( int tar=0; tar < target_layers.length(); tar++ )
03000         {
03001             for (int j = 0; j < target_prediction_list[tar](i).length() ; j++ ){
03002                 
03003                 //if(i>n){
03004                     myfile << target_prediction_list[tar](i)[j] << " ";
03005                     // }
03006                     //else{
03007                     //    myfile << targets_list[tar](i)[j] << " ";
03008                     // }
03009                        
03010            
03011             }
03012         }
03013         myfile << "\n";
03014     }
03015      
03016 
03017      myfile.close();
03018 
03019 }
03020 
03021 void DenoisingRecurrentNet::generateArtificial()
03022 {
03023     //PPath* the_filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/scoreGen.amat";
03024     data = new AutoVMatrix();
03025     //data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/listData/target_tm12_input_t_tm12_tp12/scoreGen_tar_tm12__in_tm12_tp12.amat";
03026     //data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/create_data/scoreGenSuitePerf.amat";
03027     //data->filename = "/home/stan/cvs/Gamme/expressive_data/dataGen.amat";
03028     data->filename = "/home/stan/Documents/recherche_maitrise/artificialData/generate/dataGen.amat";
03029     data->defineSizes(1,1,0);
03030     //data->defineSizes(163,16,0);
03031     //data->inputsize = 21;
03032     //data->targetsize = 0;
03033     //data->weightsize = 0;
03034     data->build();
03035 
03036     
03037     
03038    
03039    
03040 
03041     int len = data->length();
03042     int tarSize = outputsize();
03043     int partTarSize;
03044     Vec input;
03045     Vec target;
03046     real weight;
03047     int targsize;
03048 
03049     Vec output(outputsize());
03050     output.clear();
03051 //     Vec costs(nTestCosts());
03052 //     costs.clear();
03053 //     Vec n_items(nTestCosts());
03054 //     n_items.clear();
03055 
03056     int r,r2;
03057     use_target_layers_masks = false;
03058 
03059     int ith_sample_in_sequence = 0;
03060     int inputsize_without_masks = inputsize() 
03061         - ( use_target_layers_masks ? targetsize() : 0 );
03062     int sum_target_elements = 0;
03063     for (int i = 0; i < len; i++)
03064     {
03065         data->getExample(i, input, target, weight);
03066         /*if(i>n)
03067         {
03068             for (int k = 1; k <= t; k++)
03069             {
03070                 if(k<=i){
03071                     partTarSize = outputsize();
03072                     for( int tar=0; tar < target_layers.length(); tar++ )
03073                     {
03074                         
03075                         input.subVec(inputsize_without_masks-(tarSize*(t-k))-partTarSize-1,target_layers[tar]->size) << target_prediction_list[tar](ith_sample_in_sequence-k);
03076                         partTarSize -= target_layers[tar]->size;
03077                         
03078                         
03079                     }
03080                 }
03081             }       
03082             }*/
03083     
03084 
03085 //         for (int k = 1; k <= t; k++)
03086 //         {
03087 //             partTarSize = outputsize();
03088 //             for( int tar=0; tar < target_layers.length(); tar++ )
03089 //             {
03090 //                 if(i>=t){
03091 //                     input.subVec(inputsize_without_masks-(tarSize*(t-k))-partTarSize-1,target_layers[tar]->size) << target_prediction_list[tar](ith_sample_in_sequence-k);
03092 //                     partTarSize -= target_layers[tar]->size;
03093 //                 }
03094 //             }
03095 //         }
03096 
03097         if( fast_exact_is_equal(input[0],end_of_sequence_symbol) )
03098         {
03099 //             ith_sample_in_sequence = 0;
03100 //             hidden_list.resize(0);
03101 //             hidden_act_no_bias_list.resize(0);
03102 //             hidden2_list.resize(0);
03103 //             hidden2_act_no_bias_list.resize(0);
03104 //             target_prediction_list.resize(0);
03105 //             target_prediction_act_no_bias_list.resize(0);
03106 //             input_list.resize(0);
03107 //             targets_list.resize(0);
03108 //             nll_list.resize(0,0);
03109 //             masks_list.resize(0);
03110 
03111             
03112 
03113             continue;
03114         }
03115 
03116         // Resize internal variables
03117         hidden_list.resize(ith_sample_in_sequence+1, hidden_layer->size);
03118         hidden_act_no_bias_list.resize(ith_sample_in_sequence+1, hidden_layer->size);
03119         if( hidden_layer2 )
03120         {
03121             hidden2_list.resize(ith_sample_in_sequence+1, hidden_layer2->size);
03122             hidden2_act_no_bias_list.resize(ith_sample_in_sequence+1, hidden_layer2->size);
03123         }
03124                  
03125         input_list.resize(ith_sample_in_sequence+1);
03126         input_list[ith_sample_in_sequence].resize(input_layer->size);
03127 
03128         targets_list.resize( target_layers.length() );
03129         target_prediction_list.resize( target_layers.length() );
03130         target_prediction_act_no_bias_list.resize( target_layers.length() );
03131         for( int tar=0; tar < target_layers.length(); tar++ )
03132         {
03133             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
03134             {
03135                 targsize = target_layers[tar]->size;
03136                 targets_list[tar].resize( ith_sample_in_sequence+1, targsize);
03137                 //targets_list[tar][ith_sample_in_sequence].resize( target_layers[tar]->size);
03138                 target_prediction_list[tar].resize(
03139                     ith_sample_in_sequence+1, targsize);
03140                 target_prediction_act_no_bias_list[tar].resize(
03141                     ith_sample_in_sequence+1, targsize);
03142             }
03143         }
03144         nll_list.resize(ith_sample_in_sequence+1,target_layers.length());
03145         if( use_target_layers_masks )
03146         {
03147             masks_list.resize( target_layers.length() );
03148             for( int tar=0; tar < target_layers.length(); tar++ )
03149                 if( !fast_exact_is_equal(target_layers_weights[tar],0) )
03150                     masks_list[tar].resize( ith_sample_in_sequence+1, target_layers[tar]->size );
03151         }
03152 
03153         // Forward propagation
03154 
03155         // Fetch right representation for input
03156         clamp_units(input.subVec(0,inputsize_without_masks),
03157                     input_layer,
03158                     input_symbol_sizes);                
03159         input_list[ith_sample_in_sequence] << input_layer->expectation;
03160 
03161         // Fetch right representation for target
03162         sum_target_elements = 0;
03163         for( int tar=0; tar < target_layers.length(); tar++ )
03164         {
03165             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
03166             {
03167                 if( use_target_layers_masks )
03168                 {
03169                     Vec masks_list_tar_i = masks_list[tar](ith_sample_in_sequence);
03170                     clamp_units(target.subVec(
03171                                     sum_target_elements,
03172                                     target_layers_n_of_target_elements[tar]),
03173                                 target_layers[tar],
03174                                 target_symbol_sizes[tar],
03175                                 input.subVec(
03176                                     inputsize_without_masks 
03177                                     + sum_target_elements, 
03178                                     target_layers_n_of_target_elements[tar]),
03179                                 masks_list_tar_i
03180                         );
03181                     
03182                 }
03183                 else
03184                 {
03185                     clamp_units(target.subVec(
03186                                     sum_target_elements,
03187                                     target_layers_n_of_target_elements[tar]),
03188                                 target_layers[tar],
03189                                 target_symbol_sizes[tar]);
03190                 }
03191                 targets_list[tar](ith_sample_in_sequence) << 
03192                     target_layers[tar]->expectation;
03193             }
03194             sum_target_elements += target_layers_n_of_target_elements[tar];
03195         }
03196         
03197         Vec hidden_act_no_bias_i = hidden_act_no_bias_list(ith_sample_in_sequence);
03198         input_connections->fprop( input_list[ith_sample_in_sequence], 
03199                                   hidden_act_no_bias_i);
03200                 
03201         if( ith_sample_in_sequence > 0 && dynamic_connections )
03202         {
03203             dynamic_connections->fprop( 
03204                 hidden_list(ith_sample_in_sequence-1),
03205                 dynamic_act_no_bias_contribution );
03206 
03207             hidden_act_no_bias_list(ith_sample_in_sequence) += 
03208                 dynamic_act_no_bias_contribution;
03209         }
03210         
03211         Vec hidden_i = hidden_list(ith_sample_in_sequence);
03212         hidden_layer->fprop( hidden_act_no_bias_i, 
03213                              hidden_i );
03214 
03215         Vec last_hidden = hidden_i;
03216                  
03217         if( hidden_layer2 )
03218         {
03219             Vec hidden2_i = hidden2_list(ith_sample_in_sequence); 
03220             Vec hidden2_act_no_bias_i = hidden2_act_no_bias_list(ith_sample_in_sequence);
03221 
03222             hidden_connections->fprop( 
03223                 hidden2_i,
03224                 hidden2_act_no_bias_i);
03225 
03226             hidden_layer2->fprop( 
03227                 hidden2_act_no_bias_i,
03228                 hidden2_i 
03229                 );
03230 
03231             last_hidden = hidden2_i; // last hidden layer vec 
03232         }
03233            
03234        
03235         for( int tar=0; tar < target_layers.length(); tar++ )
03236         {
03237             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
03238             {
03239                 Vec target_prediction_i = target_prediction_list[tar](i);
03240                 Vec target_prediction_act_no_bias_i = target_prediction_act_no_bias_list[tar](i);
03241                 target_connections[tar]->fprop(
03242                     last_hidden,
03243                     target_prediction_act_no_bias_i
03244                     );
03245                 target_layers[tar]->fprop(
03246                     target_prediction_act_no_bias_i,
03247                     target_prediction_i );
03248                 if( use_target_layers_masks )
03249                     target_prediction_i *= masks_list[tar](ith_sample_in_sequence);
03250             }
03251         }
03252         
03253 
03254         
03255 
03256         sum_target_elements = 0;
03257         for( int tar=0; tar < target_layers.length(); tar++ )
03258         {
03259             if( !fast_exact_is_equal(target_layers_weights[tar],0) )
03260             {
03261                 target_layers[tar]->activation << 
03262                     target_prediction_act_no_bias_list[tar](
03263                         ith_sample_in_sequence);
03264                 target_layers[tar]->activation += target_layers[tar]->bias;
03265                 target_layers[tar]->setExpectation(
03266                     target_prediction_list[tar](
03267                         ith_sample_in_sequence));
03268                 nll_list(ith_sample_in_sequence,tar) = 
03269                     target_layers[tar]->fpropNLL( 
03270                         targets_list[tar](ith_sample_in_sequence) ); 
03271 //                 costs[tar] += nll_list(ith_sample_in_sequence,tar);
03272                 
03273 //                 // Normalize by the number of things to predict
03274 //                 if( use_target_layers_masks )
03275 //                 {
03276 //                     n_items[tar] += sum(
03277 //                         input.subVec( inputsize_without_masks 
03278 //                                       + sum_target_elements, 
03279 //                                       target_layers_n_of_target_elements[tar]) );
03280 //                 }
03281 //                 else
03282 //                 n_items[tar]++;
03283             }
03284             if( use_target_layers_masks )
03285                 sum_target_elements += 
03286                     target_layers_n_of_target_elements[tar];
03287         }
03288         ith_sample_in_sequence++;
03289 
03290         
03291 
03292     }
03293 
03294 //     ith_sample_in_sequence = 0;
03295 //     hidden_list.resize(0);
03296 //     hidden_act_no_bias_list.resize(0);
03297 //     hidden2_list.resize(0);
03298 //     hidden2_act_no_bias_list.resize(0);
03299 //     target_prediction_list.resize(0);
03300 //     target_prediction_act_no_bias_list.resize(0);
03301 //     input_list.resize(0);
03302 //     targets_list.resize(0);
03303 //     nll_list.resize(0,0);
03304 //     masks_list.resize(0);   
03305 
03306     
03307     //Vec tempo;
03308     //TVec<real> tempo;
03309     //tempo.resize(visible_layer->size);
03310     ofstream myfile;
03311     myfile.open ("/home/stan/Documents/recherche_maitrise/artificialData/generate/generationResult.txt");
03312     
03313     for (int i = 0; i < target_prediction_list[0].length() ; i++ ){
03314        
03315        
03316         for( int tar=0; tar < target_layers.length(); tar++ )
03317         {
03318             for (int j = 0; j < target_prediction_list[tar](i).length() ; j++ ){
03319                 
03320                 //if(i>n){
03321                     myfile << target_prediction_list[tar](i)[j] << " ";
03322                     myfile << targets_list[tar](i)[j] << " ";
03323                     // }
03324                     //else{
03325                     //    myfile << targets_list[tar](i)[j] << " ";
03326                     // }
03327                        
03328            
03329             }
03330         }
03331         myfile << "\n";
03332     }
03333      
03334 
03335      myfile.close();
03336 
03337 }
03338 
03339 
03340 
03341 
03342 
03343 /*
03344 void DenoisingRecurrentNet::gen()
03345 {
03346     //PPath* the_filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/scoreGen.amat";
03347     data = new AutoVMatrix();
03348     data->filename = "/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/scoreGen.amat";
03349     data->defineSizes(21,0,0);
03350     //data->inputsize = 21;
03351     //data->targetsize = 0;
03352     //data->weightsize = 0;
03353     data->build();
03354 
03355     
03356     int len = data->length();
03357     Vec score;
03358     Vec target;
03359     real weight;
03360     Vec bias_tempo;
03361     Vec visi_bias_tempo;
03362    
03363    
03364     
03365     previous_hidden_layer.resize(hidden_layer->size);
03366     connections_idem = connections;
03367 
03368     for (int ith_sample = 0; ith_sample < len ; ith_sample++ ){
03369         
03370         data->getExample(ith_sample, score, target, weight);
03371         //score << data(ith_sample);
03372         input_prediction_list.resize(
03373             ith_sample+1,visible_layer->size);
03374         if(ith_sample > 0)
03375         {
03376             
03377             //input_list(ith_sample_in_sequence) << previous_input;
03378             //h*_{t-1}
03380             dynamic_connections->fprop(previous_hidden_layer, cond_bias);
03381             hidden_layer->setAllBias(cond_bias); 
03382             
03383             
03384             
03385             //up phase
03386             connections->setAsDownInput( input_prediction_list(ith_sample-1) );
03387             hidden_layer->getAllActivations( connections_idem );
03388             hidden_layer->computeExpectation();
03390             
03391             //previous_hidden_layer << hidden_layer->expectation;//h_{t-2} au prochain tour
03392             //previous_hidden_layer_act_no_bias << hidden_layer->activation;
03393             
03394             
03395             //h*_{t}
03397             if(dynamic_connections_copy)
03398                 dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03399             else
03400                 dynamic_connections->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03401             //dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03402             hidden_layer->expectation_is_not_up_to_date();
03403             hidden_layer->computeExpectation();//h_{t}
03405             
03406             //previous_input << visible_layer->expectation;//v_{t-1}
03407             
03408         }
03409         else
03410         {
03411             
03412             previous_hidden_layer.clear();//h_{t-1}
03413             if(dynamic_connections_copy)
03414                 dynamic_connections_copy->fprop( previous_hidden_layer ,
03415                                                  hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03416             else
03417                 dynamic_connections->fprop(previous_hidden_layer,
03418                                            hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03419             
03420             hidden_layer->expectation_is_not_up_to_date();
03421             hidden_layer->computeExpectation();//h_{t}
03422             //previous_input.resize(data->inputsize);
03423             //previous_input << data(ith_sample);
03424             
03425         }
03426         
03427         //connections_transpose->setAsDownInput( hidden_layer->expectation );
03428         //visible_layer->getAllActivations( connections_idem_t );
03429         
03430         connections->setAsUpInput( hidden_layer->expectation );
03431         visible_layer->getAllActivations( connections_idem );
03432         
03433         visible_layer->computeExpectation();
03434         //visible_layer->generateSample();
03435         partition(score.subVec(14,taillePart), visible_layer->activation.subVec(14+taillePart,taillePart), visible_layer->activation.subVec(14+(taillePart*2),taillePart));
03436         partition(score.subVec(14,taillePart), visible_layer->expectation.subVec(14+taillePart,taillePart), visible_layer->expectation.subVec(14+(taillePart*2),taillePart));
03437 
03438 
03439         visible_layer->activation.subVec(0,14+taillePart) << score;
03440         visible_layer->expectation.subVec(0,14+taillePart) << score;
03441 
03442         input_prediction_list(ith_sample) << visible_layer->expectation;
03443         
03444     }
03445     
03446     //Vec tempo;
03447     TVec<real> tempo;
03448     tempo.resize(visible_layer->size);
03449     ofstream myfile;
03450     myfile.open ("/home/stan/Documents/recherche_maitrise/DDBN_bosendorfer/data/generate/test.txt");
03451     
03452     for (int i = 0; i < len ; i++ ){
03453         tempo << input_prediction_list(i);
03454         
03455         //cout << tempo[2] << endl;
03456        
03457         for (int j = 0; j < tempo.length() ; j++ ){
03458             
03459             
03460                 
03461                 
03462                myfile << tempo[j] << " ";
03463                
03464 
03465                
03466            
03467         }
03468         myfile << "\n";
03469     }
03470      
03471 
03472      myfile.close();
03473 
03474 }*/
03475 //void DenoisingRecurrentNet::generate(int nbNotes)
03476 //{
03477 //    
03478 //    previous_hidden_layer.resize(hidden_layer->size);
03479 //    connections_idem = connections;
03480 //
03481 //    for (int ith_sample = 0; ith_sample < nbNotes ; ith_sample++ ){
03482 //        
03483 //        input_prediction_list.resize(
03484 //            ith_sample+1,visible_layer->size);
03485 //        if(ith_sample > 0)
03486 //        {
03487 //            
03488 //            //input_list(ith_sample_in_sequence) << previous_input;
03489 //            //h*_{t-1}
03490 //            //////////////////////////////////
03491 //            dynamic_connections->fprop(previous_hidden_layer, cond_bias);
03492 //            hidden_layer->setAllBias(cond_bias); //**************************
03493 //            
03494 //            
03495 //            
03496 //            //up phase
03497 //            connections->setAsDownInput( input_prediction_list(ith_sample-1) );
03498 //            hidden_layer->getAllActivations( connections_idem );
03499 //            hidden_layer->computeExpectation();
03500 //            //////////////////////////////////
03501 //            
03502 //            //previous_hidden_layer << hidden_layer->expectation;//h_{t-2} au prochain tour//******************************
03503 //            //previous_hidden_layer_act_no_bias << hidden_layer->activation;
03504 //            
03505 //            
03506 //            //h*_{t}
03507 //            ////////////
03508 //            if(dynamic_connections_copy)
03509 //                dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03510 //            else
03511 //                dynamic_connections->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03512 //            //dynamic_connections_copy->fprop( hidden_layer->expectation ,hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03513 //            hidden_layer->expectation_is_not_up_to_date();
03514 //            hidden_layer->computeExpectation();//h_{t}
03515 //            ///////////
03516 //            
03517 //            //previous_input << visible_layer->expectation;//v_{t-1}
03518 //            
03519 //        }
03520 //        else
03521 //        {
03522 //            
03523 //            previous_hidden_layer.clear();//h_{t-1}
03524 //            if(dynamic_connections_copy)
03525 //                dynamic_connections_copy->fprop( previous_hidden_layer ,
03526 //                                                 hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03527 //            else
03528 //                dynamic_connections->fprop(previous_hidden_layer,
03529 //                                           hidden_layer->activation);//conection entre h_{t-1} et h_{t}
03530 //            
03531 //            hidden_layer->expectation_is_not_up_to_date();
03532 //            hidden_layer->computeExpectation();//h_{t}
03533 //            
03534 //            
03535 //        }
03536 //        
03537 //        //connections_transpose->setAsDownInput( hidden_layer->expectation );
03538 //        //visible_layer->getAllActivations( connections_idem_t );
03539 //        
03540 //        connections->setAsUpInput( hidden_layer->expectation );
03541 //        visible_layer->getAllActivations( connections_idem );
03542 //        
03543 //        visible_layer->computeExpectation();
03544 //        visible_layer->generateSample();
03545 //        
03546 //        input_prediction_list(ith_sample) << visible_layer->sample;
03547 //        
03548 //    }
03549 //    
03550 //    //Vec tempo;
03551 //    TVec<int> tempo;
03552 //    tempo.resize(visible_layer->size);
03553 //    int theNote;
03554 //    //int nbNoteVisiLayer = input_prediction_list(1).length()/13;
03555 //    ofstream myfile;
03556 //    int theLayer;
03557 //    myfile.open ("/home/stan/Documents/recherche_maitrise/DDBN_musicGeneration/data/generate/test.txt");
03558 //    
03559 //    for (int i = 0; i < nbNotes ; i++ ){
03560 //        tempo << input_prediction_list(i);
03561 //        
03562 //        //cout << tempo[2] << endl;
03563 //       
03564 //        for (int j = 0; j < tempo.length() ; j++ ){
03565 //            
03566 //            if (tempo[j] == 1){
03567 //                theLayer = (j/13);
03568 //                
03569 //                theNote = j - (13*theLayer);
03570 //               
03571 //
03572 //                if (theNote<=11){
03573 //                    //print theNote
03574 //                    //cout << theNote+50 << " ";
03575 //                    myfile << theNote << " ";
03576 //                }
03577 //                else{
03578 //                    //print #
03579 //                    //cout << "# ";
03580 //                    myfile << "# ";
03581 //                    
03582 //                }
03583 //     
03584 //            }
03585 //           
03586 //        }
03587 //        myfile << "\n";
03588 //    }
03589 //     myfile << "<oov> <oov> \n";
03590 //
03591 //     myfile.close();
03592 //
03593 //}
03594 
03595 } // end of namespace PLearn
03596 
03597 
03598 /*
03599   Local Variables:
03600   mode:c++
03601   c-basic-offset:4
03602   c-file-style:"stroustrup"
03603   c-file-offsets:((innamespace . 0)(inline-open . 0))
03604   indent-tabs-mode:nil
03605   fill-column:79
03606   End:
03607 */
03608 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines