PLearn 0.1
NeuralProbabilisticLanguageModel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NeuralProbabilisticLanguageModel.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00039 #include "NeuralProbabilisticLanguageModel.h"
00040 #include <plearn/vmat/SubVMatrix.h>
00041 //#include <plearn/sys/Profiler.h>
00042 #include <time.h>
00043 #include <stdio.h>
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 PLEARN_IMPLEMENT_OBJECT(NeuralProbabilisticLanguageModel, 
00049                         "Feedforward neural network for language modeling",
00050                         "Implementation of the Neural Probabilistic Language "
00051                         "Model proposed by \n"
00052                         "Bengio, Ducharme, Vincent and Jauvin (JMLR 2003), "
00053                         "with extentensions to speedup\n"
00054                         "the model (Bengio and Sénécal, AISTATS 2003) and "
00055                         "to include prior information\n"
00056                         "about the distributed representation and permit "
00057                         "generalization of these\n"
00058                         "distributed representations to out-of-vocabulary "
00059                         "words using features \n"
00060                         "(Larochelle and Bengio, Tech Report 2006).\n");
00061 
00062 NeuralProbabilisticLanguageModel::NeuralProbabilisticLanguageModel() 
00063 // DEFAULT VALUES FOR ALL OPTIONS
00064     :
00065 rgen(new PRandom()),
00066 nhidden(0),
00067 nhidden2(0),
00068 weight_decay(0),
00069 bias_decay(0),
00070 layer1_weight_decay(0),
00071 layer1_bias_decay(0),
00072 layer2_weight_decay(0),
00073 layer2_bias_decay(0),
00074 output_layer_weight_decay(0),
00075 output_layer_bias_decay(0),
00076 direct_in_to_out_weight_decay(0),
00077 output_layer_dist_rep_weight_decay(0),
00078 output_layer_dist_rep_bias_decay(0),
00079 fixed_output_weights(0),
00080 direct_in_to_out(0),
00081 penalty_type("L2_square"),
00082 output_transfer_func(""),
00083 hidden_transfer_func("tanh"),
00084 start_learning_rate(0.01),
00085 decrease_constant(0),
00086 batch_size(1),
00087 stochastic_gradient_descent_speedup(true),
00088 initialization_method("uniform_linear"),
00089 dist_rep_dim(-1),
00090 possible_targets_vary(false),
00091 train_proposal_distribution(true),
00092 sampling_block_size(50),
00093 minimum_effective_sample_size(100)
00094 {}
00095 
00096 NeuralProbabilisticLanguageModel::~NeuralProbabilisticLanguageModel()
00097 {
00098 }
00099 
00100 void NeuralProbabilisticLanguageModel::declareOptions(OptionList& ol)
00101 {
00102     declareOption(ol, "nhidden", &NeuralProbabilisticLanguageModel::nhidden, 
00103                   OptionBase::buildoption, 
00104                   "Number of hidden units in first hidden layer (0 means no "
00105                   "hidden layer).\n");
00106     
00107     declareOption(ol, "nhidden2", &NeuralProbabilisticLanguageModel::nhidden2, 
00108                   OptionBase::buildoption, 
00109                   "Number of hidden units in second hidden layer (0 means no "
00110                   "hidden layer).\n");
00111     
00112     declareOption(ol, "weight_decay", 
00113                   &NeuralProbabilisticLanguageModel::weight_decay, 
00114                   OptionBase::buildoption, 
00115                   "Global weight decay for all layers.\n");
00116     
00117     declareOption(ol, "bias_decay", &NeuralProbabilisticLanguageModel::bias_decay,
00118                   OptionBase::buildoption, 
00119                   "Global bias decay for all layers.\n");
00120     
00121     declareOption(ol, "layer1_weight_decay", 
00122                   &NeuralProbabilisticLanguageModel::layer1_weight_decay, 
00123                   OptionBase::buildoption, 
00124                   "Additional weight decay for the first hidden layer. "
00125                   "Is added to weight_decay.\n");
00126     
00127     declareOption(ol, "layer1_bias_decay", 
00128                   &NeuralProbabilisticLanguageModel::layer1_bias_decay, 
00129                   OptionBase::buildoption, 
00130                   "Additional bias decay for the first hidden layer. "
00131                   "Is added to bias_decay.\n");
00132     
00133     declareOption(ol, "layer2_weight_decay", 
00134                   &NeuralProbabilisticLanguageModel::layer2_weight_decay, 
00135                   OptionBase::buildoption, 
00136                   "Additional weight decay for the second hidden layer. "
00137                   "Is added to weight_decay.\n");
00138     
00139     declareOption(ol, "layer2_bias_decay", 
00140                   &NeuralProbabilisticLanguageModel::layer2_bias_decay, 
00141                   OptionBase::buildoption, 
00142                   "Additional bias decay for the second hidden layer. "
00143                   "Is added to bias_decay.\n");
00144     
00145     declareOption(ol, "output_layer_weight_decay", 
00146                   &NeuralProbabilisticLanguageModel::output_layer_weight_decay, 
00147                   OptionBase::buildoption, 
00148                   "Additional weight decay for the output layer. "
00149                   "Is added to 'weight_decay'.\n");
00150     
00151     declareOption(ol, "output_layer_bias_decay", 
00152                   &NeuralProbabilisticLanguageModel::output_layer_bias_decay, 
00153                   OptionBase::buildoption, 
00154                   "Additional bias decay for the output layer. "
00155                   "Is added to 'bias_decay'.\n");
00156     
00157     declareOption(ol, "direct_in_to_out_weight_decay", 
00158                   &NeuralProbabilisticLanguageModel::direct_in_to_out_weight_decay,
00159                   OptionBase::buildoption,
00160                   "Additional weight decay for the weights going from the "
00161                   "input directly to the \n output layer.  Is added to "
00162                   "'weight_decay'.\n");
00163     
00164     declareOption(ol, "output_layer_dist_rep_weight_decay", 
00165                   &NeuralProbabilisticLanguageModel::output_layer_dist_rep_weight_decay, 
00166                   OptionBase::buildoption, 
00167                   "Additional weight decay for the output layer of distributed"
00168                   "representation\n"
00169                   "predictor.  Is added to 'weight_decay'.\n");
00170     
00171     declareOption(ol, "output_layer_dist_rep_bias_decay", 
00172                   &NeuralProbabilisticLanguageModel::output_layer_dist_rep_bias_decay, 
00173                   OptionBase::buildoption, 
00174                   "Additional bias decay for the output layer of distributed"
00175                   "representation\n"
00176                   "predictor.  Is added to 'bias_decay'.\n");
00177     
00178     declareOption(ol, "fixed_output_weights", 
00179                   &NeuralProbabilisticLanguageModel::fixed_output_weights, 
00180                   OptionBase::buildoption, 
00181                   "If true then the output weights are not learned. They are"
00182                   "initialized to +1 or -1 randomly.\n");
00183     
00184     declareOption(ol, "direct_in_to_out", 
00185                   &NeuralProbabilisticLanguageModel::direct_in_to_out, 
00186                   OptionBase::buildoption, 
00187                   "If true then direct input to output weights will be added "
00188                   "(if nhidden > 0).\n");
00189     
00190     declareOption(ol, "penalty_type", 
00191                   &NeuralProbabilisticLanguageModel::penalty_type,
00192                   OptionBase::buildoption,
00193                   "Penalty to use on the weights (for weight and bias decay).\n"
00194                   "Can be any of:\n"
00195                   "  - \"L1\": L1 norm,\n"
00196                   "  - \"L2_square\" (default): square of the L2 norm.\n");
00197     
00198     declareOption(ol, "output_transfer_func", 
00199                   &NeuralProbabilisticLanguageModel::output_transfer_func, 
00200                   OptionBase::buildoption, 
00201                   "what transfer function to use for ouput layer? One of: \n"
00202                   "  - \"tanh\" \n"
00203                   "  - \"sigmoid\" \n"
00204                   "  - \"softmax\" \n"
00205                   "An empty string or \"none\" means no output transfer function \n");
00206     
00207     declareOption(ol, "hidden_transfer_func", 
00208                   &NeuralProbabilisticLanguageModel::hidden_transfer_func, 
00209                   OptionBase::buildoption, 
00210                   "What transfer function to use for hidden units? One of \n"
00211                   "  - \"linear\" \n"
00212                   "  - \"tanh\" \n"
00213                   "  - \"sigmoid\" \n"
00214                   "  - \"softmax\" \n");
00215     
00216     declareOption(ol, "cost_funcs", &NeuralProbabilisticLanguageModel::cost_funcs, 
00217                   OptionBase::buildoption, 
00218                   "A list of cost functions to use\n"
00219                   "in the form \"[ cf1; cf2; cf3; ... ]\" where each function "
00220                   "is one of: \n"
00221                   "  - \"NLL\" (negative log likelihood -log(p[c]) for "
00222                   "classification) \n"
00223                   "  - \"class_error\" (classification error) \n"
00224                   "The FIRST function of the list will be used as \n"
00225                   "the objective function to optimize \n"
00226                   "(possibly with an added weight decay penalty) \n");
00227     
00228     declareOption(ol, "start_learning_rate", 
00229                   &NeuralProbabilisticLanguageModel::start_learning_rate, 
00230                   OptionBase::buildoption, 
00231                   "Start learning rate of gradient descent.\n");
00232                   
00233     declareOption(ol, "decrease_constant", 
00234                   &NeuralProbabilisticLanguageModel::decrease_constant, 
00235                   OptionBase::buildoption, 
00236                   "Decrease constant of gradient descent.\n");
00237 
00238     declareOption(ol, "batch_size", 
00239                   &NeuralProbabilisticLanguageModel::batch_size, 
00240                   OptionBase::buildoption, 
00241                   "How many samples to use to estimate the avergage gradient before updating the weights\n"
00242                   "0 is equivalent to specifying training_set->length() \n");
00243 
00244     declareOption(ol, "stochastic_gradient_descent_speedup", 
00245                   &NeuralProbabilisticLanguageModel::stochastic_gradient_descent_speedup, 
00246                   OptionBase::buildoption, 
00247                   "Indication that a trick to speedup stochastic "
00248                   "gradient descent\n"
00249                   "should be used.\n");
00250 
00251     declareOption(ol, "initialization_method", 
00252                   &NeuralProbabilisticLanguageModel::initialization_method, 
00253                   OptionBase::buildoption, 
00254                   "The method used to initialize the weights:\n"
00255                   " - \"normal_linear\"  = a normal law with variance "
00256                   "1/n_inputs\n"
00257                   " - \"normal_sqrt\"    = a normal law with variance "
00258                   "1/sqrt(n_inputs)\n"
00259                   " - \"uniform_linear\" = a uniform law in [-1/n_inputs,"
00260                   "1/n_inputs]\n"
00261                   " - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(n_inputs),"
00262                   "1/sqrt(n_inputs)]\n"
00263                   " - \"zero\"           = all weights are set to 0\n");
00264     
00265     declareOption(ol, "dist_rep_dim", 
00266                   &NeuralProbabilisticLanguageModel::dist_rep_dim, 
00267                   OptionBase::buildoption, 
00268                   " Dimensionality (number of components) of distributed "
00269                   "representations.\n"
00270                   "If <= 0, than distributed representations will not be used.\n"
00271         );
00272     
00273     declareOption(ol, "possible_targets_vary", 
00274                   &NeuralProbabilisticLanguageModel::possible_targets_vary, 
00275                   OptionBase::buildoption, 
00276                   "Indication that the set of possible targets vary from\n"
00277                   "one input vector to another.\n"
00278         );
00279     
00280     declareOption(ol, "feat_sets", &NeuralProbabilisticLanguageModel::feat_sets, 
00281                                 OptionBase::buildoption, 
00282                   "FeatureSets to apply on input. The number of feature\n"
00283                   "sets should be a divisor of inputsize(). The feature\n"
00284                   "sets applied to the ith input field is the feature\n"
00285                   "set at position i % feat_sets.length().\n"
00286         );
00287 
00288     declareOption(ol, "train_proposal_distribution", 
00289                   &NeuralProbabilisticLanguageModel::train_proposal_distribution
00290                   OptionBase::buildoption, 
00291                   "Indication that the proposal distribution must be trained\n"
00292                   "(using train_set).\n"
00293         );
00294 
00295     declareOption(ol, "sampling_block_size", 
00296                   &NeuralProbabilisticLanguageModel::sampling_block_size, 
00297                   OptionBase::buildoption, 
00298                   "Size of the sampling blocks.\n"
00299         );
00300 
00301     declareOption(ol, "minimum_effective_sample_size", 
00302                   &NeuralProbabilisticLanguageModel::minimum_effective_sample_size, 
00303                   OptionBase::buildoption, 
00304                   "Minimum effective sample size.\n"
00305         );
00306 
00307     declareOption(ol, "train_set", &NeuralProbabilisticLanguageModel::train_set, 
00308                   OptionBase::learntoption, 
00309                   "VMatrix used for training, that also provides information about the data (e.g. Dictionary objects for the different fields).\n");
00310 
00311 
00312                   // Networks' learnt parameters
00313     declareOption(ol, "w1", &NeuralProbabilisticLanguageModel::w1, 
00314                   OptionBase::learntoption, 
00315                   "Weights of first hidden layer.\n");
00316     declareOption(ol, "b1", &NeuralProbabilisticLanguageModel::b1, 
00317                   OptionBase::learntoption, 
00318                   "Bias of first hidden layer.\n");
00319     declareOption(ol, "w2", &NeuralProbabilisticLanguageModel::w2, 
00320                   OptionBase::learntoption, 
00321                   "Weights of second hidden layer.\n");
00322     declareOption(ol, "b2", &NeuralProbabilisticLanguageModel::b2, 
00323                   OptionBase::learntoption, 
00324                   "Bias of second hidden layer.\n");
00325     declareOption(ol, "wout", &NeuralProbabilisticLanguageModel::wout, 
00326                   OptionBase::learntoption, 
00327                   "Weights of output layer.\n");
00328     declareOption(ol, "bout", &NeuralProbabilisticLanguageModel::bout, 
00329                   OptionBase::learntoption, 
00330                   "Bias of output layer.\n");
00331     declareOption(ol, "direct_wout", 
00332                   &NeuralProbabilisticLanguageModel::direct_wout, 
00333                   OptionBase::learntoption, 
00334                   "Direct input to output weights.\n");
00335     declareOption(ol, "direct_bout", 
00336                   &NeuralProbabilisticLanguageModel::direct_bout, 
00337                   OptionBase::learntoption, 
00338                   "Direct input to output bias.\n");
00339     declareOption(ol, "wout_dist_rep", 
00340                   &NeuralProbabilisticLanguageModel::wout_dist_rep, 
00341                   OptionBase::learntoption, 
00342                   "Weights of output layer for distributed representation "
00343                   "predictor.\n");
00344     declareOption(ol, "bout_dist_rep", 
00345                   &NeuralProbabilisticLanguageModel::bout_dist_rep, 
00346                   OptionBase::learntoption, 
00347                   "Bias of output layer for distributed representation "
00348                   "predictor.\n");
00349 
00350     inherited::declareOptions(ol);
00351 
00352 }
00353 
00355 // build //
00357 void NeuralProbabilisticLanguageModel::build()
00358 {
00359     inherited::build();
00360     build_();
00361 }
00362 
00363 
00365 // build_ //
00367 void NeuralProbabilisticLanguageModel::build_()
00368 {
00369     // Don't do anything if we don't have a train_set
00370     // It's the only one who knows the inputsize, targetsize and weightsize
00371 
00372     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00373     {
00374         if(targetsize_ != 1)
00375             PLERROR("In NeuralProbabilisticLanguageModel::build_(): "
00376                     "targetsize_ must be 1, not %d",targetsize_);
00377 
00378         n_feat_sets = feat_sets.length();
00379 
00380         if(n_feat_sets == 0)
00381             PLERROR("In NeuralProbabilisticLanguageModel::build_(): "
00382                     "at least one FeatureSet must be provided\n");
00383         
00384         if(inputsize_ % n_feat_sets != 0)
00385             PLERROR("In NeuralProbabilisticLanguageModel::build_(): "
00386                     "feat_sets.length() must be a divisor of inputsize()");
00387         
00388         // Process penalty type option
00389         string pt = lowerstring( penalty_type );
00390         if( pt == "l1" )
00391             penalty_type = "L1";
00392         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00393             penalty_type = "L2_square";
00394         else if( pt == "l2" )
00395         {
00396             PLWARNING("In NeuralProbabilisticLanguageModel::build_(): "
00397                       "L2 penalty not supported, assuming you want L2 square");
00398             penalty_type = "L2_square";
00399         }
00400         else
00401             PLERROR("In NeuralProbabilisticLanguageModel::build_(): "
00402                     "penalty_type \"%s\" not supported", penalty_type.c_str());
00403         
00404         int ncosts = cost_funcs.size();  
00405         if(ncosts<=0)
00406             PLERROR("In NeuralProbabilisticLanguageModel::build_(): "
00407                     "Empty cost_funcs : must at least specify the cost "
00408                     "function to optimize!");
00409         
00410         if(stage <= 0 ) // Training hasn't started
00411         {
00412             // Initialize parameters
00413             initializeParams();                        
00414         }
00415         
00416         output_comp.resize(total_output_size);
00417         row.resize(train_set->width());
00418         row.fill(MISSING_VALUE);
00419         feats.resize(inputsize_);
00420         // Making sure that all feats[i] have non null storage...
00421         for(int i=0; i<feats.length(); i++)
00422         {
00423             feats[i].resize(1);
00424             feats[i].resize(0);
00425         }
00426         if(fixed_output_weights && stochastic_gradient_descent_speedup)
00427             PLERROR("In NeuralProbabilisticLanguageModel::build_(): "
00428                     "cannot use stochastic gradient descent speedup with "
00429                     "fixed output weights");
00430         val_string_reference_set = train_set;
00431         target_values_reference_set = train_set;
00432 
00433         if(proposal_distribution)
00434         {
00435             if(batch_size != 1)
00436                 PLERROR("In NeuralProbabilisticLanguageModel::build_(): "
00437                         "importance sampling speedup is not implemented for"
00438                         "batch size != 1");
00439             sample.resize(1);            
00440             if(train_proposal_distribution)
00441             {
00442                 proposal_distribution->setTrainingSet(train_set);
00443                 proposal_distribution->train();
00444             }
00445         }
00446     }
00447 }
00448 
00449 void NeuralProbabilisticLanguageModel::fprop(const Vec& inputv, Vec& outputv, 
00450                                              const Vec& targetv, Vec& costsv, 
00451                                              real sampleweight) const
00452 {
00453     
00454     fpropOutput(inputv,outputv);
00455     //if(is_missing(outputv[0]))
00456     //    cout << "What the fuck" << endl;
00457     fpropCostsFromOutput(inputv, outputv, targetv, costsv, sampleweight);
00458     //if(is_missing(costsv[0]))
00459     //    cout << "Re-What the fuck" << endl;
00460 
00461 }
00462 
00463 void NeuralProbabilisticLanguageModel::fpropOutput(const Vec& inputv, 
00464                                                    Vec& outputv) const
00465 {
00466     // Forward propagation until reaches output weights, sets last_layer
00467     fpropBeforeOutputWeights(inputv);
00468     
00469     if(dist_rep_dim > 0) // x -> d(x)
00470     {        
00471         // d(x),h1(d(x)),h2(h1(d(x))) -> o(x)
00472 
00473         add_affine_transform(last_layer,wout,bout,outputv,false,
00474                              possible_targets_vary,target_values);            
00475         if(direct_in_to_out && nhidden>0)
00476             add_affine_transform(nnet_input,direct_wout,direct_bout,
00477                                  outputv,false,possible_targets_vary,
00478                                  target_values);
00479     }
00480     else
00481     {
00482         // x, h1(x),h2(h1(x)) -> o(x)
00483         add_affine_transform(last_layer,wout,bout,outputv,nhidden<=0,
00484                              possible_targets_vary,target_values);            
00485         if(direct_in_to_out && nhidden>0)
00486             add_affine_transform(feat_input,direct_wout,direct_bout,
00487                                  outputv,true,possible_targets_vary,
00488                                  target_values);
00489     }
00490                                
00491     if (nhidden2>0 && nhidden<=0)
00492         PLERROR("NeuralProbabilisticLanguageModel::fprop(): "
00493                 "can't have nhidden2 (=%d) > 0 while nhidden=0",nhidden2);
00494     
00495     if(output_transfer_func!="" && output_transfer_func!="none")
00496        add_transfer_func(outputv, output_transfer_func);
00497 }
00498 
00499 void NeuralProbabilisticLanguageModel::fpropBeforeOutputWeights(
00500     const Vec& inputv) const
00501 {
00502     // Get possible target values
00503     if(possible_targets_vary) 
00504     {
00505         row.subVec(0,inputsize_) << inputv;
00506         target_values_reference_set->getValues(row,inputsize_,target_values);
00507         outputv.resize(target_values.length());
00508     }
00509 
00510     // Get features
00511     ni = inputsize_;
00512     nfeats = 0;
00513     for(int i=0; i<ni; i++)
00514     {
00515         str = val_string_reference_set->getValString(i,inputv[i]);
00516         feat_sets[i%n_feat_sets]->getFeatures(str,feats[i]);
00517         nfeats += feats[i].length();
00518     }
00519     
00520     feat_input.resize(nfeats);
00521     offset = 0;
00522     id = 0;
00523     for(int i=0; i<ni; i++)
00524     {
00525         f = feats[i].data();
00526         nj = feats[i].length();
00527         for(int j=0; j<nj; j++)
00528             feat_input[id++] = offset + *f++;
00529         if(dist_rep_dim <= 0 || ((i+1) % n_feat_sets != 0))
00530             offset += feat_sets[i % n_feat_sets]->size();
00531         else
00532             offset = 0;
00533     }
00534 
00535     // Fprop up to output weights
00536     if(dist_rep_dim > 0) // x -> d(x)
00537     {        
00538         nfeats = 0;
00539         id = 0;
00540         for(int i=0; i<inputsize_;)
00541         {
00542             ifeats = 0;
00543             for(int j=0; j<n_feat_sets; j++,i++)
00544                 ifeats += feats[i].length();
00545             
00546             add_affine_transform(feat_input.subVec(nfeats,ifeats),
00547                                  wout_dist_rep, bout_dist_rep,
00548                                  nnet_input.subVec(id*dist_rep_dim,dist_rep_dim),
00549                                       true, false);
00550             nfeats += ifeats;
00551             id++;
00552         }
00553 
00554         if(nhidden>0) // d(x) -> h1(d(x))
00555         {
00556             add_affine_transform(nnet_input,w1,b1,hiddenv,false,false);
00557             add_transfer_func(hiddenv);
00558 
00559             if(nhidden2>0) // h1(d(x)) -> h2(h1(d(x)))
00560             {
00561                 add_affine_transform(hiddenv,w2,b2,hidden2v,false,false);
00562                 add_transfer_func(hidden2v);
00563                 last_layer = hidden2v;
00564             }
00565             else
00566                 last_layer = hiddenv;
00567         }
00568         else
00569             last_layer = nnet_input;
00570 
00571     }
00572     else
00573     {        
00574         if(nhidden>0) // x -> h1(x)
00575         {
00576             add_affine_transform(feat_input,w1,b1,hiddenv,true,false);
00577             // Transfert function
00578             add_transfer_func(hiddenv);
00579 
00580             if(nhidden2>0) // h1(x) -> h2(h1(x))
00581             {
00582                 add_affine_transform(hiddenv,w2,b2,hidden2v,true,false);
00583                 add_transfer_func(hidden2v);
00584                 last_layer = hidden2v;
00585             }
00586             else
00587                 last_layer = hiddenv;
00588         }
00589         else
00590             last_layer = feat_input;
00591     }
00592 }
00593 
00594 void NeuralProbabilisticLanguageModel::fpropCostsFromOutput(const Vec& inputv, const Vec& outputv, const Vec& targetv, Vec& costsv, real sampleweight) const
00595 {
00596     //Compute cost
00597 
00598     if(possible_targets_vary)
00599     {
00600         reind_target = target_values.find(targetv[0]);
00601         if(reind_target<0)
00602             PLERROR("In NeuralProbabilisticLanguageModel::fprop(): target %d is not in possible targets", targetv[0]);
00603     }
00604     else
00605         reind_target = (int)targetv[0];
00606 
00607     // Build cost function
00608 
00609     int ncosts = cost_funcs.size();
00610     for(int k=0; k<ncosts; k++)
00611     {
00612         if(cost_funcs[k]=="NLL") 
00613         {
00614             costsv[k] = sampleweight*nll(outputv,reind_target);
00615         }
00616         else if(cost_funcs[k]=="class_error")
00617             costsv[k] = sampleweight*classification_loss(outputv, reind_target);
00618         else 
00619             PLERROR("In NeuralProbabilisticLanguageModel::fprop(): "
00620                     "unknown cost_func option: %s",cost_funcs[k].c_str());        
00621     }
00622 }
00623 
00624 void NeuralProbabilisticLanguageModel::bprop(Vec& inputv, Vec& outputv, 
00625                                              Vec& targetv, Vec& costsv, 
00626                                              real learning_rate, 
00627                                              real sampleweight)
00628 {
00629     if(possible_targets_vary) 
00630     {
00631         gradient_outputv.resize(target_values.length());
00632         gradient_act_outputv.resize(target_values.length());
00633         if(!stochastic_gradient_descent_speedup)
00634             target_values_since_last_update.append(target_values);
00635     }
00636 
00637     if(!stochastic_gradient_descent_speedup)
00638         feats_since_last_update.append(feat_input);
00639 
00640     // Gradient through cost
00641     if(cost_funcs[0]=="NLL") 
00642     {
00643         // Permits to avoid numerical precision errors
00644         if(output_transfer_func == "softmax")
00645             gradient_outputv[reind_target] = learning_rate*sampleweight;
00646         else
00647             gradient_outputv[reind_target] = learning_rate*sampleweight/(outputv[reind_target]);            
00648     }
00649     else if(cost_funcs[0]=="class_error")
00650     {
00651         PLERROR("NeuralProbabilisticLanguageModel::bprop(): gradient "
00652                 "cannot be computed for \"class_error\" cost");
00653     }
00654 
00655     // Gradient through output transfer function
00656     if(output_transfer_func != "linear")
00657     {
00658         if(cost_funcs[0]=="NLL" && output_transfer_func == "softmax")
00659             gradient_transfer_func(outputv,gradient_act_outputv, gradient_outputv,
00660                                     output_transfer_func, reind_target);
00661         else
00662             gradient_transfer_func(outputv,gradient_act_outputv, gradient_outputv,
00663                                     output_transfer_func);
00664         gradient_last_layer = gradient_act_outputv;
00665     }
00666     else
00667         gradient_last_layer = gradient_act_outputv;
00668     
00669     // Gradient through output affine transform
00670 
00671 
00672     if(nhidden2 > 0) {
00673         gradient_affine_transform(hidden2v, wout, bout, gradient_hidden2v, 
00674                                   gradient_wout, gradient_bout, 
00675                                   gradient_last_layer,
00676                                   false, possible_targets_vary, 
00677                                   learning_rate*sampleweight, 
00678                                   weight_decay+output_layer_weight_decay,
00679                                   bias_decay+output_layer_bias_decay,
00680                                   target_values);
00681     }
00682     else if(nhidden > 0) 
00683     {
00684         gradient_affine_transform(hiddenv, wout, bout, gradient_hiddenv,
00685                                   gradient_wout, gradient_bout, 
00686                                   gradient_last_layer,
00687                                   false, possible_targets_vary, 
00688                                   learning_rate*sampleweight, 
00689                                   weight_decay+output_layer_weight_decay,
00690                                   bias_decay+output_layer_bias_decay, 
00691                                   target_values);
00692     }
00693     else
00694     {
00695         gradient_affine_transform(nnet_input, wout, bout, gradient_nnet_input, 
00696                                   gradient_wout, gradient_bout, 
00697                                   gradient_last_layer,
00698                                   (dist_rep_dim <= 0), possible_targets_vary, 
00699                                   learning_rate*sampleweight, 
00700                                   weight_decay+output_layer_weight_decay,
00701                                   bias_decay+output_layer_bias_decay, 
00702                                   target_values);
00703     }
00704 
00705 
00706     if(nhidden>0 && direct_in_to_out)
00707     {
00708         gradient_affine_transform(nnet_input, direct_wout, direct_bout,
00709                                   gradient_nnet_input, 
00710                                   gradient_direct_wout, gradient_direct_bout,
00711                                   gradient_last_layer,
00712                                   dist_rep_dim<=0, possible_targets_vary,
00713                                   learning_rate*sampleweight, 
00714                                   weight_decay+direct_in_to_out_weight_decay,
00715                                   0,
00716                                   target_values);
00717     }
00718 
00719 
00720     if(nhidden2 > 0)
00721     {
00722         gradient_transfer_func(hidden2v,gradient_act_hidden2v,gradient_hidden2v);
00723         gradient_affine_transform(hiddenv, w2, b2, gradient_hiddenv, 
00724                                   gradient_w2, gradient_b2, gradient_act_hidden2v,
00725                                   false, false,learning_rate*sampleweight, 
00726                                   weight_decay+layer2_weight_decay,
00727                                   bias_decay+layer2_bias_decay);
00728     }
00729     if(nhidden > 0)
00730     {
00731         gradient_transfer_func(hiddenv,gradient_act_hiddenv,gradient_hiddenv);  
00732         gradient_affine_transform(nnet_input, w1, b1, gradient_nnet_input, 
00733                                   gradient_w1, gradient_b1, gradient_act_hiddenv,
00734                                   dist_rep_dim<=0, false,learning_rate*sampleweight, 
00735                                   weight_decay+layer1_weight_decay,
00736                                   bias_decay+layer1_bias_decay);
00737     }
00738 
00739     if(dist_rep_dim > 0)
00740     {
00741         nfeats = 0;
00742         id = 0;
00743         for(int i=0; i<inputsize_; )
00744         {
00745             ifeats = 0;
00746             for(int j=0; j<n_feat_sets; j++,i++)
00747                 ifeats += feats[i].length();
00748             gradient_affine_transform(feat_input.subVec(nfeats,ifeats),
00749                                       wout_dist_rep, bout_dist_rep,
00750                                       //gradient_feat_input.subVec(nfeats,feats[i].length()),
00751                                       gradient_feat_input,// Useless anyways...
00752                                       gradient_wout_dist_rep,
00753                                       gradient_bout_dist_rep,
00754                                       gradient_nnet_input.subVec(
00755                                           id*dist_rep_dim,dist_rep_dim),
00756                                       true, false, learning_rate*sampleweight, 
00757                                       weight_decay+
00758                                       output_layer_dist_rep_weight_decay,
00759                                       bias_decay+output_layer_dist_rep_bias_decay);
00760             nfeats += ifeats;
00761             id++;
00762         }
00763     }
00764 
00765     clearProppathGradient();
00766 }
00767 
00768 void NeuralProbabilisticLanguageModel::bpropBeforeOutputWeights(
00769     real learning_rate, 
00770     real sampleweight)
00771 {
00772 }
00773 
00774 
00775 void NeuralProbabilisticLanguageModel::update()
00776 {
00777 
00778     if(dist_rep_dim > 0)
00779     {
00780         update_affine_transform(feats_since_last_update, wout_dist_rep, 
00781                                 bout_dist_rep, gradient_wout_dist_rep,
00782                                 gradient_bout_dist_rep, true, false,
00783                                 target_values_since_last_update);
00784     }
00785 
00786     if(nhidden>0) 
00787     {
00788         update_affine_transform(feats_since_last_update, w1, b1, 
00789                                 gradient_w1, gradient_b1,
00790                                 dist_rep_dim<=0, false,
00791                                 target_values_since_last_update);
00792         if(nhidden2>0) 
00793         {
00794             update_affine_transform(feats_since_last_update, w2, b2, 
00795                                     gradient_w2, gradient_b2,
00796                                     false, false,
00797                                     target_values_since_last_update);
00798         }
00799 
00800         update_affine_transform(feats_since_last_update, wout, bout, 
00801                                 gradient_wout, gradient_bout,
00802                                 false, possible_targets_vary,
00803                                 target_values_since_last_update);
00804         if(direct_in_to_out)
00805         {
00806             update_affine_transform(feats_since_last_update, direct_wout, 
00807                                     direct_bout, 
00808                                     gradient_direct_wout, gradient_direct_bout,
00809                                     false, possible_targets_vary,
00810                                     target_values_since_last_update);
00811         }
00812     }
00813     else
00814     {
00815         update_affine_transform(feats_since_last_update, wout, bout, 
00816                                 gradient_wout, gradient_bout,
00817                                 dist_rep_dim<=0, possible_targets_vary,
00818                                 target_values_since_last_update);
00819     }
00820 
00821     feats_since_last_update.resize(0);
00822     target_values_since_last_update.resize(0);
00823 }
00824 
00825 void NeuralProbabilisticLanguageModel::update_affine_transform(
00826     Vec input, Mat weights, Vec bias,
00827     Mat gweights, Vec gbias,
00828     bool input_is_sparse, bool output_is_sparse,
00829     Vec output_indices) 
00830 {
00831     // Bias
00832     if(bias.length() != 0)
00833     {
00834         if(output_is_sparse)
00835         {
00836             pval1 = gbias.data();
00837             pval2 = bias.data();
00838             pval3 = output_indices.data();
00839             ni = output_indices.length();
00840             for(int i=0; i<ni; i++)
00841             {
00842                 pval2[(int)*pval3] += pval1[(int)*pval3];
00843                 pval1[(int)*pval3] = 0;
00844                 pval3++;
00845             }
00846         }
00847         else
00848         {
00849             pval1 = gbias.data();
00850             pval2 = bias.data();
00851             ni = bias.length();
00852             for(int i=0; i<ni; i++)
00853             {
00854                 *pval2 += *pval1;
00855                 *pval1 = 0;
00856                 pval1++; 
00857                 pval2++;
00858             }
00859         }
00860     }
00861 
00862     // Weights
00863     if(!input_is_sparse && !output_is_sparse)
00864     {
00865         if(!gweights.isCompact() || !weights.isCompact())
00866             PLERROR("In NeuralProbabilisticLanguageModel::"
00867                     "update_affine_transform(): weights or gweights is"
00868                     "not a compact TMat");
00869         ni = weights.length();
00870         nj = weights.width();
00871         pval1 = gweights.data();
00872         pval2 = weights.data();
00873         for(int i=0; i<ni; i++)
00874             for(int j=0; j<nj; j++)
00875             {
00876                 *pval2 += *pval1;
00877                 *pval1 = 0;
00878                 pval1++;
00879                 pval2++;
00880             }
00881     }
00882     else if(!input_is_sparse && output_is_sparse)
00883     {
00884         ni = output_indices.length();
00885         nj = input.length();
00886         pval3 = output_indices.data();
00887         for(int i=0; i<ni; i++)
00888         {
00889             for(int j=0; j<nj; j++)
00890             {
00891                 weights(j,(int)*pval3) += gweights(j,(int)*pval3);
00892                 gweights(j,(int)*pval3) = 0;
00893             }
00894             pval3++;
00895         }
00896     }
00897     else if(input_is_sparse && !output_is_sparse)
00898     {
00899         ni = input.length();
00900         nj = weights.width();
00901         pval3 = input.data();
00902         for(int i=0; i<ni; i++)
00903         {
00904             pval1 = gweights[(int)(*pval3)];
00905             pval2 = weights[(int)(*pval3++)];
00906             for(int j=0; j<nj;j++)
00907             {
00908                 *pval2 += *pval1;
00909                 *pval1 = 0;
00910                 pval1++;
00911                 pval2++;
00912             }
00913         }
00914     }
00915     else if(input_is_sparse && output_is_sparse)
00916     {
00917         // Weights
00918         ni = input.length();
00919         nj = output_indices.length();
00920         pval2 = input.data();
00921         for(int i=0; i<ni; i++)
00922         {
00923             pval3 = output_indices.data();
00924             for(int j=0; j<nj; j++)
00925             {
00926                 weights((int)(*pval2),(int)*pval3) += 
00927                     gweights((int)(*pval2),(int)*pval3);
00928                 gweights((int)(*pval2),(int)*pval3) = 0;
00929                 pval3++;
00930             }
00931             pval2++;
00932         }
00933     }
00934 }
00935 
00937 void NeuralProbabilisticLanguageModel::clearProppathGradient()
00938 {
00939     // Trick to make clearProppathGradient faster...
00940     if(cost_funcs[0]=="NLL") 
00941         gradient_outputv[reind_target] = 0;
00942     else
00943         gradient_outputv.clear();
00944     gradient_act_outputv.clear();
00945     
00946     if(dist_rep_dim>0)
00947         gradient_nnet_input.clear();
00948 
00949     if(nhidden>0) 
00950     {
00951         gradient_hiddenv.clear();
00952         gradient_act_hiddenv.clear();
00953         if(nhidden2>0) 
00954         {
00955             gradient_hidden2v.clear();
00956             gradient_act_hidden2v.clear();
00957         }
00958     }
00959 }
00960 
00961 
00963 // computeCostsFromOutputs //
00965 void NeuralProbabilisticLanguageModel::computeCostsFromOutputs(const Vec& inputv, 
00966                                                                const Vec& outputv,
00967                                                                const Vec& targetv,
00968                                                                Vec& costsv) const
00969 {
00970     PLERROR("In NeuralProbabilisticLanguageModel::computeCostsFromOutputs():"
00971             "output is not enough to compute costs");
00972 }
00973 
00974 int NeuralProbabilisticLanguageModel::my_argmax(const Vec& vec, 
00975                                                 int default_compare) const
00976 {
00977 #ifdef BOUNDCHECK
00978     if(vec.length()==0)
00979         PLERROR("IN int argmax(const TVec<T>& vec) vec has zero length");
00980 #endif
00981     real* v = vec.data();
00982     int indexmax = default_compare;
00983     real maxval = v[default_compare];
00984     for(int i=0; i<vec.length(); i++)
00985         if(v[i]>maxval)
00986         {
00987             maxval = v[i];
00988             indexmax = i;
00989         }
00990     return indexmax;
00991 }
00992 
00994 // computeOutput //
00996 void NeuralProbabilisticLanguageModel::computeOutput(const Vec& inputv, 
00997                                                      Vec& outputv) const
00998 {
00999     fpropOutput(inputv, output_comp);
01000     if(possible_targets_vary)
01001     {
01002         //row.subVec(0,inputsize_) << inputv;
01003         //target_values_reference_set->getValues(row,inputsize_,target_values);
01004         outputv[0] = target_values[
01005             my_argmax(output_comp,rgen->uniform_multinomial_sample(
01006                           output_comp.length()))];
01007     }
01008     else
01009         outputv[0] = argmax(output_comp);
01010 }
01011 
01013 // computeOutputAndCosts //
01015 void NeuralProbabilisticLanguageModel::computeOutputAndCosts(const Vec& inputv, 
01016                                                              const Vec& targetv, 
01017                                                              Vec& outputv, 
01018                                                              Vec& costsv) const
01019 {
01020     fprop(inputv,output_comp,targetv,costsv);
01021     if(possible_targets_vary)
01022     {
01023         //row.subVec(0,inputsize_) << inputv;
01024         //target_values_reference_set->getValues(row,inputsize_,target_values);
01025         outputv[0] = 
01026             target_values[
01027                 my_argmax(output_comp,rgen->uniform_multinomial_sample(
01028                               output_comp.length()))];
01029     }
01030     else
01031         outputv[0] = argmax(output_comp);
01032 }
01033 
01035 // fillWeights //
01037 void NeuralProbabilisticLanguageModel::fillWeights(const Mat& weights) {
01038     if (initialization_method == "zero") {
01039         weights.clear();
01040         return;
01041     }
01042     real delta;
01043     int is = weights.length();
01044     if (initialization_method.find("linear") != string::npos)
01045         delta = 1.0 / real(is);
01046     else
01047         delta = 1.0 / sqrt(real(is));
01048     if (initialization_method.find("normal") != string::npos)
01049         rgen->fill_random_normal(weights, 0, delta);
01050     else
01051         rgen->fill_random_uniform(weights, -delta, delta);
01052 }
01053 
01055 // forget //
01057 void NeuralProbabilisticLanguageModel::forget()
01058 {
01059     if (train_set) build();
01060     total_updates=0;
01061     stage = 0;
01062 }
01063 
01065 // getTrainCostNames //
01067 TVec<string> NeuralProbabilisticLanguageModel::getTrainCostNames() const
01068 {
01069     return cost_funcs;
01070 }
01071 
01073 // getTestCostNames //
01075 TVec<string> NeuralProbabilisticLanguageModel::getTestCostNames() const
01076 { 
01077     return cost_funcs;
01078 }
01079 
01081 // add_transfer_func //
01083 void NeuralProbabilisticLanguageModel::add_transfer_func(const Vec& input, 
01084                                                          string transfer_func) 
01085     const
01086 {
01087     if (transfer_func == "default")
01088         transfer_func = hidden_transfer_func;
01089     if(transfer_func=="linear")
01090         return;
01091     else if(transfer_func=="tanh")
01092     {
01093         compute_tanh(input,input);
01094         return;
01095     }        
01096     else if(transfer_func=="sigmoid")
01097     {
01098         compute_sigmoid(input,input);
01099         return;
01100     }
01101     else if(transfer_func=="softmax")
01102     {
01103         compute_softmax(input,input);
01104         return;
01105     }
01106     else PLERROR("In NeuralProbabilisticLanguageModel::add_transfer_func(): "
01107                  "Unknown value for transfer_func: %s",transfer_func.c_str());
01108 }
01109 
01111 // gradient_transfer_func //
01113 void NeuralProbabilisticLanguageModel::gradient_transfer_func(
01114     Vec& output, 
01115     Vec& gradient_input,
01116     Vec& gradient_output,
01117     string transfer_func,
01118     int nll_softmax_speed_up_target) 
01119 {
01120     if (transfer_func == "default")        
01121         transfer_func = hidden_transfer_func;
01122     if(transfer_func=="linear")
01123     {
01124         pval1 = gradient_output.data();
01125         pval2 = gradient_input.data();
01126         ni = output.length();
01127         for(int i=0; i<ni; i++)
01128             *pval2++ += *pval1++;
01129         return;
01130     }
01131     else if(transfer_func=="tanh")
01132     {
01133         pval1 = gradient_output.data();
01134         pval2 = output.data();
01135         pval3 = gradient_input.data();
01136         ni = output.length();
01137         for(int i=0; i<ni; i++)
01138             *pval3++ += (*pval1++)*(1.0-square(*pval2++));
01139         return;
01140     }        
01141     else if(transfer_func=="sigmoid")
01142     {
01143         pval1 = gradient_output.data();
01144         pval2 = output.data();
01145         pval3 = gradient_input.data();
01146         ni = output.length();
01147         for(int i=0; i<ni; i++)
01148         {
01149             *pval3++ += (*pval1++)*(*pval2)*(1.0-*pval2);
01150             pval2++;
01151         }   
01152         return;
01153     }
01154     else if(transfer_func=="softmax")
01155     {
01156         if(nll_softmax_speed_up_target<0)
01157         {            
01158             pval3 = gradient_input.data();
01159             ni = nk = output.length();
01160             for(int i=0; i<ni; i++)
01161             {
01162                 val = output[i];
01163                 pval1 = gradient_output.data();
01164                 pval2 = output.data();
01165                 for(int k=0; k<nk; k++)
01166                     if(k!=i)
01167                         *pval3 -= *pval1++ * val * (*pval2++);
01168                     else
01169                     {
01170                         *pval3 += *pval1++ * val * (1.0-val);
01171                         pval2++;
01172                     }
01173                 pval3++;                
01174             }   
01175         }
01176         else // Permits speedup and avoids numerical precision errors
01177         {
01178             pval2 = output.data();
01179             pval3 = gradient_input.data();
01180             ni = output.length();
01181             grad = gradient_output[nll_softmax_speed_up_target];
01182             val = output[nll_softmax_speed_up_target];
01183             for(int i=0; i<ni; i++)
01184             {
01185                 if(nll_softmax_speed_up_target!=i)
01186                     //*pval3++ -= grad * val * (*pval2++);
01187                     *pval3++ -= grad * (*pval2++);
01188                 else
01189                 {
01190                     //*pval3++ += grad * val * (1.0-val);
01191                     *pval3++ += grad * (1.0-val);
01192                     pval2++;
01193                 }
01194             }   
01195         }
01196         return;
01197     }
01198     else PLERROR("In NeuralProbabilisticLanguageModel::gradient_transfer_func():"
01199                  "Unknown value for transfer_func: %s",transfer_func.c_str());
01200 }
01201 
01202 void NeuralProbabilisticLanguageModel::add_affine_transform(
01203     Vec input, 
01204     Mat weights, 
01205     Vec bias, Vec output, 
01206     bool input_is_sparse, bool output_is_sparse,
01207     Vec output_indices) const
01208 {
01209     // Bias
01210     if(bias.length() != 0)
01211     {
01212         if(output_is_sparse)
01213         {
01214             pval1 = output.data();
01215             pval2 = bias.data();
01216             pval3 = output_indices.data();
01217             ni = output.length();
01218             for(int i=0; i<ni; i++)
01219                 *pval1++ = pval2[(int)*pval3++];
01220         }
01221         else
01222         {
01223             pval1 = output.data();
01224             pval2 = bias.data();
01225             ni = output.length();
01226             for(int i=0; i<ni; i++)
01227                 *pval1++ = *pval2++;
01228         }
01229     }
01230 
01231     // Weights
01232     if(!input_is_sparse && !output_is_sparse)
01233     {
01234         transposeProductAcc(output,weights,input);
01235     }
01236     else if(!input_is_sparse && output_is_sparse)
01237     {
01238         ni = output.length();
01239         nj = input.length();
01240         pval1 = output.data();
01241         pval3 = output_indices.data();
01242         for(int i=0; i<ni; i++)
01243         {
01244             pval2 = input.data();
01245             for(int j=0; j<nj; j++)
01246                 *pval1 += (*pval2++)*weights(j,(int)*pval3);
01247             pval1++;
01248             pval3++;
01249         }
01250     }
01251     else if(input_is_sparse && !output_is_sparse)
01252     {
01253         ni = input.length();
01254         nj = output.length();
01255         if(ni != 0)
01256         {
01257             pval3 = input.data();
01258             for(int i=0; i<ni; i++)
01259             {
01260                 pval1 = output.data();
01261                 pval2 = weights[(int)(*pval3++)];
01262                 for(int j=0; j<nj;j++)
01263                     *pval1++ += *pval2++;
01264             }
01265         }
01266     }
01267     else if(input_is_sparse && output_is_sparse)
01268     {
01269         // Weights
01270         ni = input.length();
01271         nj = output.length();
01272         if(ni != 0)
01273         {
01274             pval2 = input.data();
01275             for(int i=0; i<ni; i++)
01276             {
01277                 pval1 = output.data();
01278                 pval3 = output_indices.data();
01279                 for(int j=0; j<nj; j++)
01280                     *pval1++ += weights((int)(*pval2),(int)*pval3++);
01281                 pval2++;
01282             }
01283         }
01284     }
01285 }
01286 
01287 void NeuralProbabilisticLanguageModel::gradient_affine_transform(
01288     Vec input, Mat weights, Vec bias, 
01289     Vec ginput, Mat gweights, Vec gbias,
01290     Vec goutput, bool input_is_sparse, 
01291     bool output_is_sparse,
01292     real learning_rate,
01293     real weight_decay, real bias_decay,
01294     Vec output_indices)
01295 {
01296     // Bias
01297     if(bias.length() != 0)
01298     {
01299         if(output_is_sparse)
01300         {
01301             pval1 = gbias.data();
01302             pval2 = goutput.data();
01303             pval3 = output_indices.data();
01304             ni = goutput.length();
01305             
01306             if(fast_exact_is_equal(bias_decay, 0))
01307             {
01308                 // Without bias decay
01309                 for(int i=0; i<ni; i++)
01310                     pval1[(int)*pval3++] += *pval2++;
01311             }
01312             else
01313             {
01314                 // With bias decay
01315                 if(penalty_type == "L2_square")
01316                 {
01317                     pval4 = bias.data();
01318                     val = -two(learning_rate)*bias_decay;
01319                     for(int i=0; i<ni; i++)
01320                     {
01321                         pval1[(int)*pval3] += *pval2++ + val*(pval4[(int)*pval3]);
01322                         pval3++;
01323                     }
01324                 }
01325                 else if(penalty_type == "L1")
01326                 {
01327                     pval4 = bias.data();
01328                     val = -learning_rate*bias_decay;
01329                     for(int i=0; i<ni; i++)
01330                     {
01331                         val2 = pval4[(int)*pval3];
01332                         if(val2 > 0 )
01333                             pval1[(int)*pval3] += *pval2 + val;
01334                         else if(val2 < 0)
01335                             pval1[(int)*pval3] += *pval2 - val;
01336                         pval2++;
01337                         pval3++;
01338                     }
01339                 }
01340             }
01341         }
01342         else
01343         {
01344             pval1 = gbias.data();
01345             pval2 = goutput.data();
01346             ni = goutput.length();
01347             if(fast_exact_is_equal(bias_decay, 0))
01348             {
01349                 // Without bias decay
01350                 for(int i=0; i<ni; i++)
01351                     *pval1++ += *pval2++;
01352             }
01353             else
01354             {
01355                 // With bias decay
01356                 if(penalty_type == "L2_square")
01357                 {
01358                     pval3 = bias.data();
01359                     val = -two(learning_rate)*bias_decay;
01360                     for(int i=0; i<ni; i++)
01361                     {
01362                         *pval1++ += *pval2++ + val * (*pval3++);
01363                     }
01364                 }
01365                 else if(penalty_type == "L1")
01366                 {
01367                     pval3 = bias.data();
01368                     val = -learning_rate*bias_decay;
01369                     for(int i=0; i<ni; i++)
01370                     {
01371                         if(*pval3 > 0)
01372                             *pval1 += *pval2 + val;
01373                         else if(*pval3 < 0)
01374                             *pval1 += *pval2 - val;
01375                         pval1++;
01376                         pval2++;
01377                         pval3++;
01378                     }
01379                 }
01380             }
01381         }
01382     }
01383 
01384     // Weights and input (when appropriate)
01385     if(!input_is_sparse && !output_is_sparse)
01386     {        
01387         // Input
01388         //productAcc(ginput, weights, goutput);
01389         // Weights
01390         //externalProductAcc(gweights, input, goutput);
01391 
01392         // Faster code to do this, which limits the accesses
01393         // to memory
01394 
01395         ni = input.length();
01396         nj = goutput.length();
01397         pval3 = ginput.data();
01398         pval5 = input.data();
01399         
01400         if(fast_exact_is_equal(weight_decay, 0))
01401         {
01402             // Without weight decay
01403             for(int i=0; i<ni; i++) {
01404                 
01405                 pval1 = goutput.data();
01406                 pval2 = weights[i];
01407                 pval4 = gweights[i];
01408                 for(int j=0; j<nj; j++) {
01409                     *pval3 += *pval2 * (*pval1);
01410                     *pval4 += *pval5 * (*pval1);
01411                     pval1++;
01412                     pval2++;
01413                     pval4++;
01414                 }
01415                 pval3++;
01416                 pval5++;
01417             }   
01418         }
01419         else
01420         {
01421             //With weight decay            
01422             if(penalty_type == "L2_square")
01423             {
01424                 val = -two(learning_rate)*weight_decay;
01425                 for(int i=0; i<ni; i++) {   
01426                     pval1 = goutput.data();
01427                     pval2 = weights[i];
01428                     pval4 = gweights[i];
01429                     for(int j=0; j<nj; j++) {
01430                         *pval3 += *pval2 * (*pval1);
01431                         *pval4 += *pval5 * (*pval1) + val * (*pval2);
01432                         pval1++;
01433                         pval2++;
01434                         pval4++;
01435                     }
01436                     pval3++;
01437                     pval5++;
01438                 }
01439             }
01440             else if(penalty_type == "L1")
01441             {
01442                 val = -learning_rate*weight_decay;
01443                 for(int i=0; i<ni; i++) {
01444                     
01445                     pval1 = goutput.data();
01446                     pval2 = weights[i];
01447                     pval4 = gweights[i];
01448                     for(int j=0; j<nj; j++) {
01449                         *pval3 += *pval2 * (*pval1);
01450                         if(*pval2 > 0)
01451                             *pval4 += *pval5 * (*pval1) + val;
01452                         else if(*pval2 < 0)
01453                             *pval4 += *pval5 * (*pval1) - val;
01454                         pval1++;
01455                         pval2++;
01456                         pval4++;
01457                     }
01458                     pval3++;
01459                     pval5++;
01460                 }
01461             }
01462         }
01463     }
01464     else if(!input_is_sparse && output_is_sparse)
01465     {
01466         ni = goutput.length();
01467         nj = input.length();
01468         pval1 = goutput.data();
01469         pval3 = output_indices.data();
01470         
01471         if(fast_exact_is_equal(weight_decay, 0))
01472         {
01473             // Without weight decay
01474             for(int i=0; i<ni; i++)
01475             {
01476                 pval2 = input.data();
01477                 pval4 = ginput.data();
01478                 for(int j=0; j<nj; j++)
01479                 {
01480                     // Input
01481                     *pval4++ += weights(j,(int)(*pval3))*(*pval1);
01482                     // Weights
01483                     gweights(j,(int)(*pval3)) += (*pval2++)*(*pval1);
01484                 }
01485                 pval1++;
01486                 pval3++;
01487             }
01488         }
01489         else
01490         {
01491             // With weight decay
01492             if(penalty_type == "L2_square")
01493             {
01494                 val = -two(learning_rate)*weight_decay;
01495                 for(int i=0; i<ni; i++)
01496                 {
01497                     pval2 = input.data();
01498                     pval4 = ginput.data();
01499                     for(int j=0; j<nj; j++)
01500                     {
01501                         val2 = weights(j,(int)(*pval3));
01502                         // Input
01503                         *pval4++ += val2*(*pval1);
01504                         // Weights
01505                         gweights(j,(int)(*pval3)) += (*pval2++)*(*pval1) 
01506                             + val*val2;
01507                     }
01508                     pval1++;
01509                     pval3++;
01510                 }
01511             }
01512             else if(penalty_type == "L1")
01513             {
01514                 val = -learning_rate*weight_decay;
01515                 for(int i=0; i<ni; i++)
01516                 {
01517                     pval2 = input.data();
01518                     pval4 = ginput.data();
01519                     for(int j=0; j<nj; j++)
01520                     {
01521                         val2 = weights(j,(int)(*pval3));
01522                         // Input
01523                         *pval4++ += val2*(*pval1);
01524                         // Weights
01525                         if(val2 > 0)
01526                             gweights(j,(int)(*pval3)) += (*pval2)*(*pval1) + val;
01527                         else if(val2 < 0)
01528                             gweights(j,(int)(*pval3)) += (*pval2)*(*pval1) - val;
01529                         pval2++;
01530                     }
01531                     pval1++;
01532                     pval3++;
01533                 }
01534             }
01535         }
01536     }
01537     else if(input_is_sparse && !output_is_sparse)
01538     {
01539         ni = input.length();
01540         nj = goutput.length();
01541 
01542         if(fast_exact_is_equal(weight_decay, 0))
01543         {
01544             // Without weight decay
01545             if(ni != 0)
01546             {
01547                 pval3 = input.data();
01548                 for(int i=0; i<ni; i++)
01549                 {
01550                     pval1 = goutput.data();
01551                     pval2 = gweights[(int)(*pval3++)];
01552                     for(int j=0; j<nj;j++)
01553                         *pval2++ += *pval1++;
01554                 }
01555             }
01556         }
01557         else
01558         {
01559             // With weight decay
01560             if(penalty_type == "L2_square")
01561             {
01562                 if(ni != 0)
01563                 {
01564                     pval3 = input.data();                    
01565                     val = -two(learning_rate)*weight_decay;
01566                     for(int i=0; i<ni; i++)
01567                     {
01568                         pval1 = goutput.data();
01569                         pval2 = gweights[(int)(*pval3)];
01570                         pval4 = weights[(int)(*pval3++)];
01571                         for(int j=0; j<nj;j++)
01572                         {
01573                             *pval2++ += *pval1++ + val * (*pval4++);
01574                         }
01575                     }
01576                 }
01577             }
01578             else if(penalty_type == "L1")
01579             {
01580                 if(ni != 0)
01581                 {
01582                     pval3 = input.data();
01583                     val = learning_rate*weight_decay;
01584                     for(int i=0; i<ni; i++)
01585                     {
01586                         pval1 = goutput.data();
01587                         pval2 = gweights[(int)(*pval3)];
01588                         pval4 = weights[(int)(*pval3++)];
01589                         for(int j=0; j<nj;j++)
01590                         {
01591                             if(*pval4 > 0)
01592                                 *pval2 += *pval1 + val;
01593                             else if(*pval4 < 0)
01594                                 *pval2 += *pval1 - val;
01595                             pval1++;
01596                             pval2++;
01597                             pval4++;
01598                         }
01599                     }
01600                 }
01601             }
01602         }
01603     }
01604     else if(input_is_sparse && output_is_sparse)
01605     {
01606         ni = input.length();
01607         nj = goutput.length();
01608 
01609         if(fast_exact_is_equal(weight_decay, 0))
01610         {
01611             // Without weight decay
01612             if(ni != 0)
01613             {
01614                 pval2 = input.data();
01615                 for(int i=0; i<ni; i++)
01616                 {
01617                     pval1 = goutput.data();
01618                     pval3 = output_indices.data();
01619                     for(int j=0; j<nj; j++)
01620                         gweights((int)(*pval2),(int)*pval3++) += *pval1++;
01621                     pval2++;
01622                 }
01623             }
01624         }
01625         else
01626         {
01627             // With weight decay
01628             if(penalty_type == "L2_square")
01629             {
01630                 if(ni != 0)
01631                 {
01632                     pval2 = input.data();
01633                     val = -two(learning_rate)*weight_decay;                    
01634                     for(int i=0; i<ni; i++)
01635                     {
01636                         pval1 = goutput.data();
01637                         pval3 = output_indices.data();
01638                         for(int j=0; j<nj; j++)
01639                         {
01640                             gweights((int)(*pval2),(int)*pval3) 
01641                                 += *pval1++ 
01642                                 + val * weights((int)(*pval2),(int)*pval3);
01643                             pval3++;
01644                         }
01645                         pval2++;
01646                     }
01647                 }
01648             }
01649             else if(penalty_type == "L1")
01650             {
01651                 if(ni != 0)
01652                 {
01653                     pval2 = input.data();
01654                     val = -learning_rate*weight_decay;                    
01655                     for(int i=0; i<ni; i++)
01656                     {
01657                         pval1 = goutput.data();
01658                         pval3 = output_indices.data();
01659                         for(int j=0; j<nj; j++)
01660                         {
01661                             val2 = weights((int)(*pval2),(int)*pval3);
01662                             if(val2 > 0)
01663                                 gweights((int)(*pval2),(int)*pval3) 
01664                                     += *pval1 + val;
01665                             else if(val2 < 0)
01666                                 gweights((int)(*pval2),(int)*pval3) 
01667                                     += *pval1 - val;
01668                             pval1++;
01669                             pval3++;
01670                         }
01671                         pval2++;
01672                     }
01673                 }
01674             }
01675         }
01676     }
01677 
01678 //    gradient_penalty(input,weights,bias,gweights,gbias,input_is_sparse,output_is_sparse,
01679 //                     learning_rate,weight_decay,bias_decay,output_indices);
01680 }
01681 
01682 void NeuralProbabilisticLanguageModel::gradient_penalty(
01683     Vec input, Mat weights, Vec bias, 
01684     Mat gweights, Vec gbias,
01685     bool input_is_sparse, bool output_is_sparse,
01686     real learning_rate,
01687     real weight_decay, real bias_decay,
01688     Vec output_indices)
01689 {
01690     // Bias
01691     if(!fast_exact_is_equal(bias_decay, 0) && !fast_exact_is_equal(bias.length(),
01692                                                                    0) )
01693     {
01694         if(output_is_sparse)
01695         {
01696             pval1 = gbias.data();
01697             pval2 = bias.data();
01698             pval3 = output_indices.data();
01699             ni = output_indices.length();            
01700             if(penalty_type == "L2_square")
01701             {
01702                 val = -two(learning_rate)*bias_decay;
01703                 for(int i=0; i<ni; i++)
01704                 {
01705                     pval1[(int)*pval3] += val*(pval2[(int)*pval3]);
01706                     pval3++;
01707                 }
01708             }
01709             else if(penalty_type == "L1")
01710             {
01711                 val = -learning_rate*bias_decay;
01712                 for(int i=0; i<ni; i++)
01713                 {
01714                     val2 = pval2[(int)*pval3];
01715                     if(val2 > 0 )
01716                         pval1[(int)*pval3++] += val;
01717                     else if(val2 < 0)
01718                         pval1[(int)*pval3++] -= val;
01719                 }
01720             }
01721         }
01722         else
01723         {
01724             pval1 = gbias.data();
01725             pval2 = bias.data();
01726             ni = output_indices.length();            
01727             if(penalty_type == "L2_square")
01728             {
01729                 val = -two(learning_rate)*bias_decay;
01730                 for(int i=0; i<ni; i++)
01731                     *pval1++ += val*(*pval2++);
01732             }
01733             else if(penalty_type == "L1")
01734             {
01735                 val = -learning_rate*bias_decay;
01736                 for(int i=0; i<ni; i++)
01737                 {
01738                     if(*pval2 > 0)
01739                         *pval1 += val;
01740                     else if(*pval2 < 0)
01741                         *pval1 -= val;
01742                     pval1++;
01743                     pval2++;
01744                 }
01745             }
01746         }
01747     }
01748 
01749     // Weights
01750     if(!fast_exact_is_equal(weight_decay, 0))
01751     {
01752         if(!input_is_sparse && !output_is_sparse)
01753         {      
01754             if(penalty_type == "L2_square")
01755             {
01756                 multiplyAcc(gweights, weights,-two(learning_rate)*weight_decay);
01757             }
01758             else if(penalty_type == "L1")
01759             {
01760                 val = -learning_rate*weight_decay;
01761                 if(gweights.isCompact() && weights.isCompact())
01762                 {
01763                     Mat::compact_iterator itm = gweights.compact_begin();
01764                     Mat::compact_iterator itmend = gweights.compact_end();
01765                     Mat::compact_iterator itx = weights.compact_begin();
01766                     for(; itm!=itmend; ++itm, ++itx)
01767                     {
01768                         if(*itx > 0)
01769                             *itm += val;
01770                         else if(*itx < 0)
01771                             *itm -= val;
01772                     }
01773                 }
01774                 else // use non-compact iterators
01775                 {
01776                     Mat::iterator itm = gweights.begin();
01777                     Mat::iterator itmend = gweights.end();
01778                     Mat::iterator itx = weights.begin();
01779                     for(; itm!=itmend; ++itm, ++itx)
01780                     {
01781                         if(*itx > 0)
01782                             *itm += val;
01783                         else if(*itx < 0)
01784                             *itm -= val;
01785                     }
01786                 }
01787             }
01788         }
01789         else if(!input_is_sparse && output_is_sparse)
01790         {
01791             ni = output_indices.length();
01792             nj = input.length();
01793             pval1 = output_indices.data();
01794 
01795             if(penalty_type == "L2_square")
01796             {
01797                 val = -two(learning_rate)*weight_decay;
01798                 for(int i=0; i<ni; i++)
01799                 {
01800                     for(int j=0; j<nj; j++)
01801                     {
01802                         gweights(j,(int)(*pval1)) += val * 
01803                             weights(j,(int)(*pval1));
01804                     }
01805                     pval1++;
01806                 }
01807             }
01808             else if(penalty_type == "L1")
01809             {
01810                 val = -learning_rate*weight_decay;
01811                 for(int i=0; i<ni; i++)
01812                 {
01813                     for(int j=0; j<nj; j++)
01814                     {
01815                         val2 = weights(j,(int)(*pval1));
01816                         if(val2 > 0)
01817                             gweights(j,(int)(*pval1)) +=  val;
01818                         else if(val2 < 0)
01819                             gweights(j,(int)(*pval1)) -=  val;
01820                     }
01821                     pval1++;
01822                 }
01823             }
01824         }
01825         else if(input_is_sparse && !output_is_sparse)
01826         {
01827             ni = input.length();
01828             nj = output_indices.length();
01829             if(ni != 0)
01830             {
01831                 pval3 = input.data();
01832                 if(penalty_type == "L2_square")
01833                 {
01834                     val = -two(learning_rate)*weight_decay;
01835                     for(int i=0; i<ni; i++)
01836                     {
01837                         pval1 = weights[(int)(*pval3)];
01838                         pval2 = gweights[(int)(*pval3++)];
01839                         for(int j=0; j<nj;j++)
01840                             *pval2++ += val * *pval1++;
01841                     }
01842                 }
01843                 else if(penalty_type == "L1")
01844                 {
01845                     val = -learning_rate*weight_decay;
01846                     for(int i=0; i<ni; i++)
01847                     {
01848                         pval1 = weights[(int)(*pval3)];
01849                         pval2 = gweights[(int)(*pval3++)];
01850                         for(int j=0; j<nj;j++)
01851                         {
01852                             if(*pval1 > 0)
01853                                 *pval2 += val;
01854                             else if(*pval1 < 0)
01855                                 *pval2 -= val;
01856                             pval2++;
01857                             pval1++;
01858                         }
01859                     }                
01860                 }
01861             }
01862         }
01863         else if(input_is_sparse && output_is_sparse)
01864         {
01865             ni = input.length();
01866             nj = output_indices.length();
01867             if(ni != 0)
01868             {
01869                 pval1 = input.data();
01870                 if(penalty_type == "L2_square")
01871                 {
01872                     val = -two(learning_rate)*weight_decay;
01873                     for(int i=0; i<ni; i++)
01874                     {
01875                         pval2 = output_indices.data();
01876                         for(int j=0; j<nj; j++)
01877                         {
01878                             gweights((int)(*pval1),(int)*pval2) += val*
01879                                 weights((int)(*pval1),(int)*pval2);
01880                         pval2++;
01881                         }
01882                         pval1++;
01883                     }
01884                 }
01885                 else if(penalty_type == "L1")
01886                 {
01887                     val = -learning_rate*weight_decay;
01888                     for(int i=0; i<ni; i++)
01889                     {
01890                         pval2 = output_indices.data();
01891                         for(int j=0; j<nj; j++)
01892                         {
01893                             val2 = weights((int)(*pval1),(int)*pval2);
01894                             if(val2 > 0)
01895                                 gweights((int)(*pval1),(int)*pval2) += val;
01896                             else if(val2 < 0)
01897                                 gweights((int)(*pval1),(int)*pval2) -= val;
01898                             pval2++;
01899                         }
01900                         pval1++;
01901                     }
01902                     
01903                 }
01904             }
01905         }
01906     }
01907 }
01908 
01909 void NeuralProbabilisticLanguageModel::importance_sampling_gradient_update(
01910     Vec& inputv, Vec& targetv, 
01911     real learning_rate, int n_samples, 
01912     real train_sample_weight=1)
01913 {
01914     // TODO: implement NGramDistribution::generate()
01915     //       adjust deepcopy(...)
01916 
01917     // Do forward propagation that is common to all computations
01918     fpropBeforeOutputWeights(inputv);
01919 
01920     // Generate the n_samples samples from proposal_distribution
01921     generated_samples.resize(n_samples+1);
01922     densities.resize(n_samples);
01923     
01924     proposal_distribution->setPredictor(inputv);
01925     pval1 = generated_samples.data();
01926     pval2 = sample.data();
01927     pval3 = densities.data();
01928     for(int i=0; i<n_samples; i++)
01929     {
01930         proposal_distribution->generate(sample);        
01931         *pval1++ = *pval2;
01932         *pval3++ = proposal_distribution->density(sample);        
01933     }
01934 
01935     real sum = 0;
01936     generated_samples[n_samples] = targetv[0];
01937     neg_energies.resize(n_samples+1);
01938     getNegativeEnergyValues(generated_samples, neg_energies);
01939     
01940     importance_sampling_ratios.resize(
01941         importance_sampling_ratios.length() + n_samples);
01942     pval1 = importance_sampling_ratios.subVec(
01943         importance_sampling_ratios.length() - n_samples).data();
01944     pval2 = neg_energies.data();
01945     pval3 = densities.data();
01946     for(int i=0; i<n_samples; i++)
01947     {
01948         *pval1 = exp(*pval2++)/ (*pval3++);
01949         sum += *pval1;
01950     }
01951 
01952     // Compute importance sampling estimate of the gradient
01953 
01954     // Training sample contribution...
01955     gradient_last_layer.resize(1);
01956     gradient_last_layer[0] = learning_rate*train_sample_weight;
01957 
01958     if(nhidden2 > 0) {
01959         gradient_affine_transform(hidden2v, wout, bout, gradient_hidden2v, 
01960                                   gradient_wout, gradient_bout, 
01961                                   gradient_last_layer,
01962                                   false, true, learning_rate*train_sample_weight, 
01963                                   weight_decay+output_layer_weight_decay,
01964                                   bias_decay+output_layer_bias_decay,
01965                                   generated_samples.subVec(n_samples,1));
01966     }
01967     else if(nhidden > 0) 
01968     {
01969         gradient_affine_transform(hiddenv, wout, bout, gradient_hiddenv,
01970                                   gradient_wout, gradient_bout, 
01971                                   gradient_last_layer,
01972                                   false, true, learning_rate*train_sample_weight, 
01973                                   weight_decay+output_layer_weight_decay,
01974                                   bias_decay+output_layer_bias_decay, 
01975                                   generated_samples.subVec(n_samples,1));
01976     }
01977     else
01978     {
01979         gradient_affine_transform(nnet_input, wout, bout, gradient_nnet_input, 
01980                                   gradient_wout, gradient_bout, 
01981                                   gradient_last_layer,
01982                                   (dist_rep_dim <= 0), true, 
01983                                   learning_rate*train_sample_weight, 
01984                                   weight_decay+output_layer_weight_decay,
01985                                   bias_decay+output_layer_bias_decay, 
01986                                   generated_samples.subVec(n_samples,1));
01987     }
01988 
01989 
01990     if(nhidden>0 && direct_in_to_out)
01991     {
01992         gradient_affine_transform(nnet_input, direct_wout, direct_bout,
01993                                   gradient_nnet_input, 
01994                                   gradient_direct_wout, gradient_direct_bout,
01995                                   gradient_last_layer,
01996                                   dist_rep_dim<=0, true,
01997                                   learning_rate*train_sample_weight, 
01998                                   weight_decay+direct_in_to_out_weight_decay,
01999                                   0,
02000                                   generated_samples.subVec(n_samples,1));
02001     }
02002 
02003     // Importance sampling contributions
02004     for(int i=0; i<n_samples; i++)
02005     {
02006         gradient_last_layer.resize(1);
02007         gradient_last_layer[0] = -learning_rate*train_sample_weight*
02008             importance_sampling_ratios[i]/sum;
02009 
02010         if(nhidden2 > 0) {
02011             gradient_affine_transform(hidden2v, wout, bout, gradient_hidden2v, 
02012                                       gradient_wout, gradient_bout, 
02013                                       gradient_last_layer,
02014                                       false, true, 
02015                                       learning_rate*train_sample_weight, 
02016                                       weight_decay+output_layer_weight_decay,
02017                                       bias_decay+output_layer_bias_decay,
02018                                       generated_samples.subVec(i,1));
02019         }
02020         else if(nhidden > 0) 
02021         {
02022             gradient_affine_transform(hiddenv, wout, bout, gradient_hiddenv,
02023                                       gradient_wout, gradient_bout, 
02024                                       gradient_last_layer,
02025                                       false, true, 
02026                                       learning_rate*train_sample_weight, 
02027                                       weight_decay+output_layer_weight_decay,
02028                                       bias_decay+output_layer_bias_decay, 
02029                                       generated_samples.subVec(i,1));
02030         }
02031         else
02032         {
02033             gradient_affine_transform(nnet_input, wout, bout, 
02034                                       gradient_nnet_input, 
02035                                       gradient_wout, gradient_bout, 
02036                                       gradient_last_layer,
02037                                       (dist_rep_dim <= 0), true, 
02038                                       learning_rate*train_sample_weight, 
02039                                       weight_decay+output_layer_weight_decay,
02040                                       bias_decay+output_layer_bias_decay, 
02041                                       generated_samples.subVec(i,1));
02042         }
02043 
02044 
02045         if(nhidden>0 && direct_in_to_out)
02046         {
02047             gradient_affine_transform(nnet_input, direct_wout, direct_bout,
02048                                       gradient_nnet_input, 
02049                                       gradient_direct_wout, gradient_direct_bout,
02050                                       gradient_last_layer,
02051                                       dist_rep_dim<=0, true,
02052                                       learning_rate*train_sample_weight, 
02053                                       weight_decay+direct_in_to_out_weight_decay,
02054                                       0,
02055                                       generated_samples.subVec(i,1));
02056         }
02057 
02058     }
02059 
02060     // Propagate all contributions through rest of the network
02061 
02062     if(nhidden2 > 0)
02063     {
02064         gradient_transfer_func(hidden2v,gradient_act_hidden2v,gradient_hidden2v);
02065         gradient_affine_transform(hiddenv, w2, b2, gradient_hiddenv, 
02066                                   gradient_w2, gradient_b2, gradient_act_hidden2v,
02067                                   false, false,learning_rate*train_sample_weight, 
02068                                   weight_decay+layer2_weight_decay,
02069                                   bias_decay+layer2_bias_decay);
02070     }
02071     if(nhidden > 0)
02072     {
02073         gradient_transfer_func(hiddenv,gradient_act_hiddenv,gradient_hiddenv);  
02074         gradient_affine_transform(nnet_input, w1, b1, gradient_nnet_input, 
02075                                   gradient_w1, gradient_b1, gradient_act_hiddenv,
02076                                   dist_rep_dim<=0, false,learning_rate*train_sample_weight, 
02077                                   weight_decay+layer1_weight_decay,
02078                                   bias_decay+layer1_bias_decay);
02079     }
02080 
02081     if(dist_rep_dim > 0)
02082     {
02083         nfeats = 0;
02084         id = 0;
02085         for(int i=0; i<inputsize_; )
02086         {
02087             ifeats = 0;
02088             for(int j=0; j<n_feat_sets; j++,i++)
02089                 ifeats += feats[i].length();
02090             gradient_affine_transform(feat_input.subVec(nfeats,ifeats),
02091                                       wout_dist_rep, bout_dist_rep,
02092                                       gradient_feat_input,// Useless anyways...
02093                                       gradient_wout_dist_rep,
02094                                       gradient_bout_dist_rep,
02095                                       gradient_nnet_input.subVec(
02096                                           id*dist_rep_dim,dist_rep_dim),
02097                                       true, false, 
02098                                       learning_rate*train_sample_weight, 
02099                                       weight_decay+
02100                                       output_layer_dist_rep_weight_decay,
02101                                       bias_decay
02102                                       +output_layer_dist_rep_bias_decay);
02103             nfeats += ifeats;
02104             id++;
02105         }
02106     }
02107     clearProppathGradient();
02108 
02109     // Update parameters and clear gradient
02110     if(!stochastic_gradient_descent_speedup)
02111         update();
02112 }
02113 
02114 void NeuralProbabilisticLanguageModel::getNegativeEnergyValues(
02115     Vec samples, Vec neg_energies)
02116 {
02117     if(dist_rep_dim > 0) // x -> d(x)
02118     {        
02119         // d(x),h1(d(x)),h2(h1(d(x))) -> o(x)
02120 
02121         add_affine_transform(last_layer,wout,bout,neg_energies,false,
02122                              true,samples);            
02123         if(direct_in_to_out && nhidden>0)
02124             add_affine_transform(nnet_input,direct_wout,direct_bout,
02125                                  neg_energies,false,true,
02126                                  samples);
02127     }
02128     else
02129     {
02130         // x, h1(x),h2(h1(x)) -> o(x)
02131         add_affine_transform(last_layer,wout,bout,samples,nhidden<=0,
02132                              true,samples);            
02133         if(direct_in_to_out && nhidden>0)
02134             add_affine_transform(feat_input,direct_wout,direct_bout,
02135                                  neg_energies,true,true,
02136                                  samples);
02137     }
02138 }
02139 
02140 void NeuralProbabilisticLanguageModel::compute_softmax(const Vec& x, 
02141                                                        const Vec& y) const
02142 {
02143     int n = x.length();
02144     
02145 //    real* yp = y.data();
02146 //    real* xp = x.data();
02147 //    for(int i=0; i<n; i++)
02148 //    {
02149 //        *yp++ = *xp > 1e-5 ? *xp : 1e-5;
02150 //        xp++;
02151 //    }
02152 
02153     if (n>0)
02154     {
02155         real* yp = y.data();
02156         real* xp = x.data();
02157         real maxx = max(x);
02158         real s = 0;
02159         for (int i=0;i<n;i++)
02160             s += (*yp++ = safeexp(*xp++-maxx));
02161         if (s == 0) PLERROR("trying to divide by 0 in softmax");
02162         s = 1.0 / s;
02163         yp = y.data();
02164         for (int i=0;i<n;i++)
02165             *yp++ *= s;
02166     }
02167 }
02168 
02169 real NeuralProbabilisticLanguageModel::nll(const Vec& outputv, int target) const
02170 {
02171     return -safeflog(outputv[target]);
02172 }
02173     
02174 real NeuralProbabilisticLanguageModel::classification_loss(const Vec& outputv, 
02175                                                            int target) const
02176 {
02177     return (argmax(outputv) == target ? 0 : 1);
02178 }
02179 
02180 void NeuralProbabilisticLanguageModel::initializeParams(bool set_seed)
02181 {
02182     if (set_seed) {
02183         if (seed_>=0)
02184             rgen->manual_seed(seed_);
02185     }
02186 
02187 
02188     PP<Dictionary> dict = train_set->getDictionary(inputsize_);
02189     total_output_size = dict->size();
02190 
02191     total_feats_per_token = 0;
02192     for(int i=0; i<n_feat_sets; i++)
02193         total_feats_per_token += feat_sets[i]->size();
02194 
02195     int nnet_inputsize;
02196     if(dist_rep_dim > 0)
02197     {
02198         wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
02199         bout_dist_rep.resize(dist_rep_dim);
02200         nnet_inputsize = dist_rep_dim*inputsize_/n_feat_sets;
02201         nnet_input.resize(nnet_inputsize);
02202 
02203         fillWeights(wout_dist_rep);
02204         bout_dist_rep.clear();
02205 
02206         gradient_wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
02207         gradient_bout_dist_rep.resize(dist_rep_dim);
02208         gradient_nnet_input.resize(nnet_inputsize);
02209         gradient_wout_dist_rep.clear();
02210         gradient_bout_dist_rep.clear();
02211         gradient_nnet_input.clear();
02212     }
02213     else
02214     {
02215         nnet_inputsize = total_feats_per_token*inputsize_/n_feat_sets;
02216         nnet_input = feat_input;
02217     }
02218 
02219     if(nhidden>0) 
02220     {
02221         w1.resize(nnet_inputsize,nhidden);
02222         b1.resize(nhidden);
02223         hiddenv.resize(nhidden);
02224 
02225         fillWeights(w1);
02226         b1.clear();
02227 
02228         gradient_w1.resize(nnet_inputsize,nhidden);
02229         gradient_b1.resize(nhidden);
02230         gradient_hiddenv.resize(nhidden);
02231         gradient_act_hiddenv.resize(nhidden);
02232         gradient_w1.clear();
02233         gradient_b1.clear();
02234         gradient_hiddenv.clear();
02235         gradient_act_hiddenv.clear();
02236         if(nhidden2>0) 
02237         {
02238             w2.resize(nhidden,nhidden2);
02239             b2.resize(nhidden2);
02240             hidden2v.resize(nhidden2);
02241             wout.resize(nhidden2,total_output_size);
02242             bout.resize(total_output_size);
02243 
02244             fillWeights(w2);
02245             b2.clear();
02246 
02247             gradient_w2.resize(nhidden,nhidden2);
02248             gradient_b2.resize(nhidden2);
02249             gradient_hidden2v.resize(nhidden2);
02250             gradient_act_hidden2v.resize(nhidden2);
02251             gradient_wout.resize(nhidden2,total_output_size);
02252             gradient_bout.resize(total_output_size);
02253             gradient_w2.clear();
02254             gradient_b2.clear();
02255             gradient_hidden2v.clear();
02256             gradient_act_hidden2v.clear();
02257             gradient_wout.clear();
02258             gradient_bout.clear();
02259         }
02260         else
02261         {
02262             wout.resize(nhidden,total_output_size);
02263             bout.resize(total_output_size);
02264 
02265             gradient_wout.resize(nhidden,total_output_size);
02266             gradient_bout.resize(total_output_size);
02267             gradient_wout.clear();
02268             gradient_bout.clear();
02269         }
02270             
02271         if(direct_in_to_out)
02272         {
02273             direct_wout.resize(nnet_inputsize,total_output_size);
02274             direct_bout.resize(0); // Because it is not used
02275 
02276             fillWeights(direct_wout);
02277                 
02278             gradient_direct_wout.resize(nnet_inputsize,total_output_size);
02279             gradient_direct_wout.clear();
02280             gradient_direct_bout.resize(0); // idem
02281         }
02282     }
02283     else
02284     {
02285         wout.resize(nnet_inputsize,total_output_size);
02286         bout.resize(total_output_size);
02287 
02288         gradient_wout.resize(nnet_inputsize,total_output_size);
02289         gradient_bout.resize(total_output_size);
02290         gradient_wout.clear();
02291         gradient_bout.clear();
02292     }
02293 
02294     //fillWeights(wout);
02295     
02296     if (fixed_output_weights) {
02297         static Vec values;
02298         if (values.size()==0)
02299         {
02300             values.resize(2);
02301             values[0]=-1;
02302             values[1]=1;
02303         }
02304         rgen->fill_random_discrete(wout.toVec(), values);
02305     }
02306     else 
02307         fillWeights(wout);
02308 
02309     bout.clear();
02310 
02311     gradient_outputv.resize(total_output_size);
02312     gradient_act_outputv.resize(total_output_size);
02313     gradient_outputv.clear();
02314     gradient_act_outputv.clear();
02315 }
02316 
02318 // makeDeepCopyFromShallowCopy //
02320 void NeuralProbabilisticLanguageModel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
02321 {
02322     inherited::makeDeepCopyFromShallowCopy(copies);
02323 
02324     // Private variables
02325     deepCopyField(target_values,copies);
02326     deepCopyField(output_comp,copies);
02327     deepCopyField(row,copies);
02328     deepCopyField(last_layer,copies);
02329     deepCopyField(gradient_last_layer,copies);
02330     deepCopyField(feats,copies);
02331     deepCopyField(gradient,copies);
02332     deepCopyField(neg_energies,copies);
02333     deepCopyField(densities,copies);
02334 
02335     // Protected variables
02336     deepCopyField(feat_input,copies);
02337     deepCopyField(gradient_feat_input,copies);
02338     deepCopyField(nnet_input,copies);
02339     deepCopyField(gradient_nnet_input,copies);
02340     deepCopyField(hiddenv,copies);
02341     deepCopyField(gradient_hiddenv,copies);
02342     deepCopyField(gradient_act_hiddenv,copies);
02343     deepCopyField(hidden2v,copies);
02344     deepCopyField(gradient_hidden2v,copies);
02345     deepCopyField(gradient_act_hidden2v,copies);
02346     deepCopyField(gradient_outputv,copies);
02347     deepCopyField(gradient_act_outputv,copies);
02348     deepCopyField(rgen,copies);
02349     deepCopyField(feats_since_last_update,copies);
02350     deepCopyField(target_values_since_last_update,copies);
02351     deepCopyField(val_string_reference_set,copies);
02352     deepCopyField(target_values_reference_set,copies);
02353     deepCopyField(importance_sampling_ratios,copies);
02354     deepCopyField(sample,copies);
02355     deepCopyField(generated_samples,copies);
02356 
02357     // Public variables
02358     deepCopyField(w1,copies);
02359     deepCopyField(gradient_w1,copies);
02360     deepCopyField(b1,copies);
02361     deepCopyField(gradient_b1,copies);
02362     deepCopyField(w2,copies);
02363     deepCopyField(gradient_w2,copies);
02364     deepCopyField(b2,copies);
02365     deepCopyField(gradient_b2,copies);
02366     deepCopyField(wout,copies);
02367     deepCopyField(gradient_wout,copies);
02368     deepCopyField(bout,copies);
02369     deepCopyField(gradient_bout,copies);
02370     deepCopyField(direct_wout,copies);
02371     deepCopyField(gradient_direct_wout,copies);
02372     deepCopyField(direct_bout,copies);
02373     deepCopyField(gradient_direct_bout,copies);
02374     deepCopyField(wout_dist_rep,copies);
02375     deepCopyField(gradient_wout_dist_rep,copies);
02376     deepCopyField(bout_dist_rep,copies);
02377     deepCopyField(gradient_bout_dist_rep,copies);
02378 
02379     // Public build options
02380     deepCopyField(cost_funcs,copies);
02381     deepCopyField(feat_sets,copies);
02382     deepCopyField(proposal_distribution,copies);
02383 
02384     PLERROR("not up to date");
02385 }
02386 
02388 // outputsize //
02390 int NeuralProbabilisticLanguageModel::outputsize() const {
02391     return targetsize_;
02392 }
02393 
02395 // train //
02397 void NeuralProbabilisticLanguageModel::train()
02398 {
02399     //Profiler::activate();
02400     if(!train_set)
02401         PLERROR("In NeuralProbabilisticLanguageModel::train, "
02402                 "you did not setTrainingSet");
02403 
02404     if(!train_stats)
02405         PLERROR("In NeuralProbabilisticLanguageModel::train, "
02406                 "you did not setTrainStatsCollector");
02407  
02408     Vec outputv(total_output_size);
02409     Vec costsv(getTrainCostNames().length());
02410     Vec inputv(train_set->inputsize());
02411     Vec targetv(train_set->targetsize());
02412     real sample_weight = 1;
02413 
02414     int l = train_set->length();  
02415     int bs = batch_size>0 ? batch_size : l;
02416 
02417     // Importance sampling speedup variables
02418     
02419     // Effective sample size statistics
02420     real effective_sample_size_sum = 0;
02421     real effective_sample_size_square_sum = 0;
02422     real importance_sampling_ratio_k = 0;
02423     // Current true sample size;
02424     int n_samples = 0;
02425 
02426     real 
02427 
02428     PP<ProgressBar> pb;
02429     if(report_progress)
02430         pb = new ProgressBar("Training " + classname() + " from stage " 
02431                              + tostring(stage) + " to " 
02432                              + tostring(nstages), nstages-stage);
02433 
02434     //if(stage == 0)
02435     //{
02436     //    for(int t=0; t<l;t++)
02437     //    {
02438     //        cout << "t=" << t << " ";
02439     //        train_set->getExample(t,inputv,targetv,sample_weight);
02440     //        row.subVec(0,inputsize_) << inputv;
02441     //        train_set->getValues(row,inputsize_,target_values);
02442     //        if(target_values.length() != 1)
02443     //            verify_gradient(inputv,targetv,1e-6);
02444     //    }
02445     //    return;
02446     //}
02447 
02448     Mat old_gradient_wout;
02449     Vec old_gradient_bout;
02450     Mat old_gradient_wout_dist_rep;
02451     Vec old_gradient_bout_dist_rep;
02452     Mat old_gradient_w1;
02453     Vec old_gradient_b1;
02454     Mat old_gradient_w2;
02455     Vec old_gradient_b2;
02456     Mat old_gradient_direct_wout;
02457 
02458     if(stochastic_gradient_descent_speedup)
02459     {
02460         // Trick to make stochastic gradient descent faster
02461 
02462         old_gradient_wout = gradient_wout;
02463         old_gradient_bout = gradient_bout;
02464         gradient_wout = wout;
02465         gradient_bout = bout;
02466         
02467         if(dist_rep_dim > 0)
02468         {
02469             old_gradient_wout_dist_rep = gradient_wout_dist_rep;
02470             old_gradient_bout_dist_rep = gradient_bout_dist_rep;
02471             gradient_wout_dist_rep = wout_dist_rep;
02472             gradient_bout_dist_rep = bout_dist_rep;
02473         }
02474 
02475         if(nhidden>0) 
02476         {
02477             old_gradient_w1 = gradient_w1;
02478             old_gradient_b1 = gradient_b1;
02479             gradient_w1 = w1;
02480             gradient_b1 = b1;
02481             if(nhidden2>0) 
02482             {
02483                 old_gradient_w2 = gradient_w2;
02484                 old_gradient_b2 = gradient_b2;
02485                 gradient_w2 = w2;
02486                 gradient_b2 = b2;
02487             }
02488             
02489             if(direct_in_to_out)
02490             {
02491                 old_gradient_direct_wout = gradient_direct_wout;
02492                 gradient_direct_wout = direct_wout;
02493             }
02494         }
02495     }
02496 
02497     int initial_stage = stage;
02498     while(stage<nstages)
02499     {
02500         for(int t=0; t<l;)
02501         {
02502             //if(t%1000 == 0)
02503             //{
02504             //    cout << "Time: " << clock()/CLOCKS_PER_SEC << " seconds." << endl;
02505             //}
02506             for(int i=0; i<bs; i++)
02507             {
02508                 //if(t == 71705)
02509                 //    cout << "It's going to fuck !!!" << endl;
02510                 
02511                 //if(t == 71704)
02512                 //    cout << "It's going to fuck !!!" << endl;
02513                 
02514                 train_set->getExample(t%l,inputv,targetv,sample_weight);
02515 
02516                 if(proposal_distributions)
02517                 {
02518                     n_samples = 0;
02519                     importance_sampling_ratios.resize(0);
02520                     effective_sample_size_sum = 0;
02521                     effective_sample_size_square_sum = 0;                    
02522                     while(effective_sample_size < minimum_effective_sample_size)
02523                     {
02524                         if(n_samples >= total_output_size)
02525                         {
02526                             gradient_last_layer.resize(total_output_size);
02527                             
02528                             fprop(inputv,outputv,targetv,costsv,sample_weight);
02529                             bprop(inputv,outputv,targetv,costsv,
02530                                   start_learning_rate/
02531                                   (bs*(1.0+decrease_constant*total_updates)),
02532                                   sample_weight);
02533                             train_stats->update(costsv);
02534                             break;
02535                         }
02536                         
02537                         importance_sampling_gradient_update(
02538                             inputv,targetv,
02539                             start_learning_rate/
02540                             (bs*(1.0+decrease_constant*total_updates)),
02541                             sampling_block_size,
02542                             sampleweight
02543                             );
02544 
02545                         // Update effective sample size
02546                         pval1 = importance_sampling_ratios.subVec(
02547                             nsamples,sampling_block_size).data();
02548                         for(int k=0; k<sampling_block_size; k++)
02549                         {                            
02550                             effective_sample_size_sum += *pval1;
02551                             effective_sample_size_square_sum += *pval1 * (*pval1);
02552                             pval1++;
02553                         }
02554                         
02555                         effective_sample_size = 
02556                             (effective_sample_size_sum*effective_sample_size_sum)/
02557                             effective_sample_size_square_sum;
02558                         n_samples += sampling_block_size;
02559                     }
02560                 }
02561                 else
02562                 {
02563                     //Profiler::start("fprop()");
02564                     fprop(inputv,outputv,targetv,costsv,sample_weight);
02565                     //Profiler::end("fprop()");
02566                     //Profiler::start("bprop()");
02567                     bprop(inputv,outputv,targetv,costsv,
02568                           start_learning_rate/
02569                           (bs*(1.0+decrease_constant*total_updates)),
02570                           sample_weight);
02571                     //Profiler::end("bprop()");
02572                     train_stats->update(costsv);
02573                 }
02574                 t++;
02575             }
02576             // Update
02577             if(!stochastic_gradient_descent_speedup)
02578                 update();
02579             total_updates++;
02580         }
02581         train_stats->finalize();
02582         ++stage;
02583         if(verbosity>2)
02584             cout << "Epoch " << stage << " train objective: " 
02585                  << train_stats->getMean() << endl;
02586         if(pb) pb->update(stage-initial_stage);
02587     }
02588 
02589     if(stochastic_gradient_descent_speedup)
02590     {
02591         // Trick to make stochastic gradient descent faster
02592 
02593         gradient_wout = old_gradient_wout;
02594         gradient_bout = old_gradient_bout;
02595         
02596         if(dist_rep_dim > 0)
02597         {
02598             gradient_wout_dist_rep = old_gradient_wout_dist_rep;
02599             gradient_bout_dist_rep = old_gradient_bout_dist_rep;
02600         }
02601 
02602         if(nhidden>0) 
02603         {
02604             gradient_w1 = old_gradient_w1;
02605             gradient_b1 = old_gradient_b1;
02606             if(nhidden2>0) 
02607             {
02608                 gradient_w2 = old_gradient_w2;
02609                 gradient_b2 = old_gradient_b2;
02610             }
02611             
02612             if(direct_in_to_out)
02613             {
02614                 gradient_direct_wout = old_gradient_direct_wout;
02615             }
02616         }
02617     }
02618     //Profiler::report(cout);
02619 }
02620 
02621 void NeuralProbabilisticLanguageModel::verify_gradient(
02622     Vec& input, Vec targetv, real step)
02623 {
02624     Vec costsv(getTrainCostNames().length());
02625     real sampleweight = 1;
02626     real verify_step = step;
02627     
02628     // To avoid the interaction between fprop and this function
02629     int nfeats = 0;
02630     int id = 0;
02631     int ifeats = 0;
02632 
02633     Vec est_gradient_bout;
02634     Mat est_gradient_wout;
02635     Vec est_gradient_bout_dist_rep;
02636     Mat est_gradient_wout_dist_rep;
02637     Vec est_gradient_b1;
02638     Mat est_gradient_w1;
02639     Vec est_gradient_b2;
02640     Mat est_gradient_w2;
02641     Vec est_gradient_direct_bout;
02642     Mat est_gradient_direct_wout;
02643 
02644     int nnet_inputsize;
02645     if(dist_rep_dim > 0)
02646     {
02647         nnet_inputsize = dist_rep_dim*inputsize_/n_feat_sets;
02648         est_gradient_wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
02649         est_gradient_bout_dist_rep.resize(dist_rep_dim);
02650         est_gradient_wout_dist_rep.clear();
02651         est_gradient_bout_dist_rep.clear();
02652         gradient_wout_dist_rep.clear();
02653         gradient_bout_dist_rep.clear();
02654     }
02655     else
02656     {
02657         nnet_inputsize = total_feats_per_token*inputsize_/n_feat_sets;
02658     }
02659     
02660     if(nhidden>0) 
02661     {
02662         est_gradient_w1.resize(nnet_inputsize,nhidden);
02663         est_gradient_b1.resize(nhidden);
02664         est_gradient_w1.clear();
02665         est_gradient_b1.clear();
02666         gradient_w1.clear();
02667         gradient_b1.clear();
02668         if(nhidden2>0) 
02669         {
02670             est_gradient_w2.resize(nhidden,nhidden2);
02671             est_gradient_b2.resize(nhidden2);
02672             est_gradient_wout.resize(nhidden2,total_output_size);
02673             est_gradient_bout.resize(total_output_size);
02674             est_gradient_w2.clear();
02675             est_gradient_b2.clear();
02676             est_gradient_wout.clear();
02677             est_gradient_bout.clear();
02678             gradient_w2.clear();
02679             gradient_b2.clear();
02680             gradient_wout.clear();
02681             gradient_bout.clear();
02682         }
02683         else
02684         {
02685             est_gradient_wout.resize(nhidden,total_output_size);
02686             est_gradient_bout.resize(total_output_size);
02687             est_gradient_wout.clear();
02688             est_gradient_bout.clear();
02689             gradient_wout.clear();
02690             gradient_bout.clear();
02691         }
02692             
02693         if(direct_in_to_out)
02694         {
02695             est_gradient_direct_wout.resize(nnet_inputsize,total_output_size);
02696             est_gradient_direct_wout.clear();
02697             est_gradient_direct_bout.resize(0); // idem
02698             gradient_direct_wout.clear();                        
02699         }
02700     }
02701     else
02702     {
02703         est_gradient_wout.resize(nnet_inputsize,total_output_size);
02704         est_gradient_bout.resize(total_output_size);
02705         est_gradient_wout.clear();
02706         est_gradient_bout.clear();
02707         gradient_wout.clear();
02708         gradient_bout.clear();
02709     }
02710 
02711     fprop(input, output_comp, targetv, costsv);
02712     bprop(input,output_comp,targetv,costsv,
02713           -1, sampleweight);
02714     clearProppathGradient();
02715     
02716     // Compute estimated gradient
02717 
02718     if(dist_rep_dim > 0) 
02719     {        
02720         nfeats = 0;
02721         id = 0;
02722         for(int i=0; i<inputsize_;)
02723         {
02724             ifeats = 0;
02725             for(int j=0; j<n_feat_sets; j++,i++)
02726                 ifeats += feats[i].length();
02727             verify_gradient_affine_transform(
02728                 input,output_comp, targetv, costsv, sampleweight,
02729                 feat_input.subVec(nfeats,ifeats),
02730                 wout_dist_rep, bout_dist_rep,
02731                 est_gradient_wout_dist_rep, est_gradient_bout_dist_rep,
02732                 true, false, verify_step);
02733             nfeats += ifeats;
02734             id++;
02735         }
02736 
02737         cout << "Verify wout_dist_rep" << endl;
02738         output_gradient_verification(gradient_wout_dist_rep.toVec(), 
02739                                      est_gradient_wout_dist_rep.toVec());
02740         cout << "Verify bout_dist_rep" << endl;
02741         output_gradient_verification(gradient_bout_dist_rep, 
02742                                      est_gradient_bout_dist_rep);
02743         gradient_wout_dist_rep.clear();
02744         gradient_bout_dist_rep.clear();
02745 
02746         if(nhidden>0) 
02747         {
02748             verify_gradient_affine_transform(
02749                 input,output_comp, targetv, costsv, sampleweight,
02750                 nnet_input,w1,b1,
02751                 est_gradient_w1, est_gradient_b1, false,false, verify_step);
02752 
02753             cout << "Verify w1" << endl;
02754             output_gradient_verification(gradient_w1.toVec(), 
02755                                          est_gradient_w1.toVec());
02756             cout << "Verify b1" << endl;
02757             output_gradient_verification(gradient_b1, est_gradient_b1);
02758             
02759             if(nhidden2>0) 
02760             {
02761                 verify_gradient_affine_transform(
02762                     input,output_comp, targetv, costsv, sampleweight,    
02763                     hiddenv,w2,b2,
02764                     est_gradient_w2, est_gradient_b2,
02765                     false,false, verify_step);
02766                 cout << "Verify w2" << endl;
02767                 output_gradient_verification(gradient_w2.toVec(), 
02768                                              est_gradient_w2.toVec());
02769                 cout << "Verify b2" << endl;
02770                 output_gradient_verification(gradient_b2, est_gradient_b2);
02771 
02772                 last_layer = hidden2v;
02773             }
02774             else
02775                 last_layer = hiddenv;
02776         }
02777         else
02778             last_layer = nnet_input;
02779 
02780         verify_gradient_affine_transform(
02781             input,output_comp, targetv, costsv, sampleweight,
02782             last_layer,wout,bout,
02783             est_gradient_wout, est_gradient_bout, false,
02784             possible_targets_vary,verify_step,target_values);
02785 
02786         cout << "Verify wout" << endl;
02787         output_gradient_verification(gradient_wout.toVec(), 
02788                                      est_gradient_wout.toVec());
02789         cout << "Verify bout" << endl;
02790         output_gradient_verification(gradient_bout, est_gradient_bout);
02791  
02792         if(direct_in_to_out && nhidden>0)
02793         {
02794             verify_gradient_affine_transform(
02795                 input,output_comp, targetv, costsv, sampleweight,
02796                 nnet_input,direct_wout,direct_bout,
02797                 est_gradient_direct_wout, est_gradient_direct_bout,false,
02798                 possible_targets_vary, verify_step, target_values);
02799             cout << "Verify direct_wout" << endl;
02800             output_gradient_verification(gradient_direct_wout.toVec(), 
02801                                          est_gradient_direct_wout.toVec());
02802             //cout << "Verify direct_bout" << endl;
02803             //output_gradient_verification(gradient_direct_bout, est_gradient_direct_bout);
02804         }
02805     }
02806     else
02807     {        
02808         if(nhidden>0)
02809         {
02810             verify_gradient_affine_transform(
02811                 input,output_comp, targetv, costsv, sampleweight,
02812                 feat_input,w1,b1,
02813                 est_gradient_w1, est_gradient_b1,
02814                 true,false, verify_step);
02815 
02816             cout << "Verify w1" << endl;
02817             output_gradient_verification(gradient_w1.toVec(), 
02818                                          est_gradient_w1.toVec());
02819             cout << "Verify b1" << endl;
02820             output_gradient_verification(gradient_b1, est_gradient_b1);
02821 
02822             if(nhidden2>0)
02823             {
02824                 verify_gradient_affine_transform(
02825                     input,output_comp, targetv, costsv, sampleweight,
02826                     hiddenv,w2,b2,
02827                     est_gradient_w2, est_gradient_b2,true,false,
02828                     verify_step);
02829 
02830                 cout << "Verify w2" << endl;
02831                 output_gradient_verification(gradient_w2.toVec(), 
02832                                              est_gradient_w2.toVec());
02833                 cout << "Verify b2" << endl;
02834                 output_gradient_verification(gradient_b2, est_gradient_b2);
02835                 
02836                 last_layer = hidden2v;
02837             }
02838             else
02839                 last_layer = hiddenv;
02840         }
02841         else
02842             last_layer = feat_input;
02843         
02844         verify_gradient_affine_transform(
02845             input,output_comp, targetv, costsv, sampleweight,
02846             last_layer,wout,bout,
02847             est_gradient_wout, est_gradient_bout, nhidden<=0,
02848             possible_targets_vary,verify_step, target_values);
02849 
02850         cout << "Verify wout" << endl;
02851         output_gradient_verification(gradient_wout.toVec(), 
02852                                      est_gradient_wout.toVec());
02853         cout << "Verify bout" << endl;
02854         output_gradient_verification(gradient_bout, est_gradient_bout);
02855         
02856         if(direct_in_to_out && nhidden>0)
02857         {
02858             verify_gradient_affine_transform(
02859                 input,output_comp, targetv, costsv, sampleweight,
02860                 feat_input,direct_wout,direct_bout,
02861                 est_gradient_wout, est_gradient_bout,true,
02862                 possible_targets_vary, verify_step,target_values);
02863             cout << "Verify direct_wout" << endl;
02864             output_gradient_verification(gradient_direct_wout.toVec(), 
02865                                          est_gradient_direct_wout.toVec());
02866             cout << "Verify direct_bout" << endl;
02867             output_gradient_verification(gradient_direct_bout, 
02868                                          est_gradient_direct_bout);
02869         }
02870     }
02871 
02872 }
02873 
02874 void NeuralProbabilisticLanguageModel::verify_gradient_affine_transform(
02875     Vec global_input, Vec& global_output, Vec& global_targetv,
02876     Vec& global_costs, real sampleweight,
02877     Vec input, Mat weights, Vec bias,
02878     Mat est_gweights, Vec est_gbias,  
02879     bool input_is_sparse, bool output_is_sparse,
02880     real step,
02881     Vec output_indices) const
02882 {
02883     real *pval1, *pval2, *pval3;
02884     int ni,nj;
02885     real out1,out2;
02886     // Bias
02887     if(bias.length() != 0)
02888     {
02889         if(output_is_sparse)
02890         {
02891             pval1 = est_gbias.data();
02892             pval2 = bias.data();
02893             pval3 = output_indices.data();
02894             ni = output_indices.length();
02895             for(int i=0; i<ni; i++)
02896             {
02897                 pval2[(int)*pval3] += step;
02898                 fprop(global_input, global_output, global_targetv, 
02899                       global_costs, sampleweight);
02900                 out1 = global_costs[0];
02901                 pval2[(int)*pval3] -= 2*step;
02902                 fprop(global_input, global_output, global_targetv, 
02903                       global_costs, sampleweight);
02904                 out2 = global_costs[0];
02905                 pval1[(int)*pval3] = (out1-out2)/(2*step);
02906                 pval2[(int)*pval3] += step;
02907                 pval3++;
02908             }
02909         }
02910         else
02911         {
02912             pval1 = est_gbias.data();
02913             pval2 = bias.data();
02914             ni = bias.length();
02915             for(int i=0; i<ni; i++)
02916             {
02917                 *pval2 += step;
02918                 fprop(global_input, global_output, global_targetv, 
02919                       global_costs, sampleweight);
02920                 out1 = global_costs[0];
02921                 *pval2 -= 2*step;
02922                 fprop(global_input, global_output, global_targetv, 
02923                       global_costs, sampleweight);
02924                 out2 = global_costs[0];
02925                 *pval1 = (out1-out2)/(2*step);
02926                 *pval2 += step;
02927                 pval1++; 
02928                 pval2++;
02929             }
02930         }
02931     }
02932 
02933     // Weights
02934     if(!input_is_sparse && !output_is_sparse)
02935     {
02936         ni = weights.length();
02937         nj = weights.width();
02938         for(int i=0; i<ni; i++)
02939             for(int j=0; j<nj; j++)
02940             {
02941                 weights(i,j) += step;
02942                 fprop(global_input, global_output, global_targetv, 
02943                       global_costs, sampleweight);
02944                 out1 = global_costs[0];
02945                 weights(i,j) -= 2*step;
02946                 fprop(global_input, global_output, global_targetv, 
02947                       global_costs, sampleweight);
02948                 out2 = global_costs[0];
02949                 weights(i,j) += step;
02950                 est_gweights(i,j) = (out1-out2)/(2*step);
02951             }
02952     }
02953     else if(!input_is_sparse && output_is_sparse)
02954     {
02955         ni = output_indices.length();
02956         nj = input.length();
02957         pval3 = output_indices.data();
02958         for(int i=0; i<ni; i++)
02959         {
02960             for(int j=0; j<nj; j++)
02961             {
02962                 weights(j,(int)*pval3) += step;
02963                 fprop(global_input, global_output, global_targetv, 
02964                       global_costs, sampleweight);
02965                 out1 = global_costs[0];
02966                 weights(j,(int)*pval3) -= 2*step;
02967                 fprop(global_input, global_output, global_targetv, 
02968                       global_costs, sampleweight);
02969                 out2 = global_costs[0];
02970                 weights(j,(int)*pval3) += step;
02971                 est_gweights(j,(int)*pval3) = (out1-out2)/(2*step);
02972 //                if(target_values.length() != 1 && input[j] != 0 && (out1-out2)/(2*step) == 0)
02973 //                {                    
02974 //                    print_what_the_fuck();
02975 //                    weights(j,(int)*pval3) += 1;
02976 //                    fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02977 //                    weights(j,(int)*pval3) -= 1;
02978 //                    cout << "out1 - global_costs[0] =" << out1-global_costs[0] << endl;
02979 //                }
02980             }
02981             pval3++;
02982         }
02983     }
02984     else if(input_is_sparse && !output_is_sparse)
02985     {
02986         ni = input.length();
02987         nj = weights.width();
02988         if(ni != 0 )
02989         {
02990             pval3 = input.data();
02991             for(int i=0; i<ni; i++)
02992             {
02993                 pval1 = est_gweights[(int)(*pval3)];
02994                 pval2 = weights[(int)(*pval3++)];
02995                 for(int j=0; j<nj;j++)
02996                 {
02997                     *pval2 += step;
02998                     fprop(global_input, global_output, global_targetv, 
02999                           global_costs, sampleweight);
03000                     out1 = global_costs[0];
03001                     *pval2 -= 2*step;
03002                     fprop(global_input, global_output, global_targetv, 
03003                           global_costs, sampleweight);
03004                     out2 = global_costs[0];
03005                     *pval1 = (out1-out2)/(2*step);
03006                     *pval2 += step;
03007                     pval1++;
03008                     pval2++;
03009                 }
03010             }
03011         }
03012     }
03013     else if(input_is_sparse && output_is_sparse)
03014     {
03015         // Weights
03016         ni = input.length();
03017         nj = output_indices.length();
03018         if(ni != 0)
03019         {
03020             pval2 = input.data();
03021             for(int i=0; i<ni; i++)
03022             {
03023                 pval3 = output_indices.data();
03024                 for(int j=0; j<nj; j++)
03025                 {
03026                     weights((int)(*pval2),(int)*pval3) += step;
03027                     fprop(global_input, global_output, global_targetv, 
03028                           global_costs, sampleweight);
03029                     out1 = global_costs[0];
03030                     weights((int)(*pval2),(int)*pval3) -= 2*step;
03031                     fprop(global_input, global_output, global_targetv, 
03032                           global_costs, sampleweight);
03033                     out2 = global_costs[0];
03034                     est_gweights((int)(*pval2),(int)*pval3)  = 
03035                         (out1-out2)/(2*step);
03036                     weights((int)(*pval2),(int)*pval3) += step;
03037                     pval3++;
03038                 }
03039                 pval2++;
03040             }
03041         }
03042     }
03043 }
03044 
03045 
03046 void NeuralProbabilisticLanguageModel::output_gradient_verification(
03047     Vec grad, Vec est_grad)
03048 {
03049     // Inspired from Func::verifyGradient()
03050 
03051     Vec num = apply(grad - est_grad,(tRealFunc)FABS);
03052     Vec denom = real(0.5)*apply(grad + est_grad,(tRealFunc)FABS);
03053     for (int i = 0; i < num.length(); i++)
03054     {
03055         if (!fast_exact_is_equal(num[i], 0))
03056             num[i] /= denom[i];
03057         else
03058             if(!fast_exact_is_equal(denom[i],0))
03059                 cout << "at position " << i << " num[i] == 0 but denom[i] = " 
03060                      << denom[i] << endl;
03061     }
03062     int pos = argmax(num);
03063     cout << max(num) << " (at position " << pos << "/" << num.length()
03064          << ", computed = " << grad[pos] << " and estimated = "
03065          << est_grad[pos] << ")" << endl;
03066 
03067     real norm_grad = norm(grad);
03068     real norm_est_grad = norm(est_grad);
03069     real cos_angle = fast_exact_is_equal(norm_grad*norm_est_grad,
03070                                          0)
03071         ? MISSING_VALUE
03072         : dot(grad,est_grad) /
03073         (norm_grad*norm_est_grad);
03074     if (cos_angle > 1)
03075         cos_angle = 1;      // Numerical imprecisions can lead to such situation.
03076     cout << "grad.length() = " << grad.length() << endl;
03077     cout << "cos(angle) : " << cos_angle << endl;
03078     cout << "angle : " << ( is_missing(cos_angle) ? MISSING_VALUE
03079                             : acos(cos_angle) ) << endl;
03080 }
03081 
03082 void NeuralProbabilisticLanguageModel::batchComputeOutputAndConfidence(
03083     VMat inputs, real probability,
03084     VMat outputs_and_confidence) const
03085 {
03086     val_string_reference_set = inputs;
03087     inherited::batchComputeOutputAndConfidence(inputs,
03088                                                probability,
03089                                                outputs_and_confidence);
03090     val_string_reference_set = train_set;
03091 }
03092 
03093 void NeuralProbabilisticLanguageModel::use(VMat testset, VMat outputs) const
03094 {
03095     val_string_reference_set = testset;
03096     if(testset->width() > train_set->inputsize())
03097         target_values_reference_set = testset;
03098     target_values_reference_set = testset;
03099     inherited::use(testset,outputs);
03100     val_string_reference_set = train_set;
03101     if(testset->width() > train_set->inputsize())
03102         target_values_reference_set = train_set;
03103 }
03104 
03105 void NeuralProbabilisticLanguageModel::test(VMat testset, 
03106                                             PP<VecStatsCollector> test_stats, 
03107                       VMat testoutputs, VMat testcosts) const
03108 {
03109     val_string_reference_set = testset;
03110     target_values_reference_set = testset;
03111     inherited::test(testset,test_stats,testoutputs,testcosts);
03112     val_string_reference_set = train_set;
03113     target_values_reference_set = train_set;
03114 }
03115 
03116 VMat NeuralProbabilisticLanguageModel::processDataSet(VMat dataset) const
03117 {
03118     VMat ret;
03119     val_string_reference_set = dataset;
03120     // Assumes it contains the target part information
03121     if(dataset->width() > train_set->inputsize())
03122         target_values_reference_set = dataset;
03123     ret = inherited::processDataSet(dataset);
03124     val_string_reference_set = train_set;
03125     if(dataset->width() > train_set->inputsize())
03126         target_values_reference_set = train_set;
03127     return ret;
03128 }
03129 
03130 } // end of namespace PLearn
03131 
03132 
03133 /*
03134   Local Variables:
03135   mode:c++
03136   c-basic-offset:4
03137   c-file-style:"stroustrup"
03138   c-file-offsets:((innamespace . 0)(inline-open . 0))
03139   indent-tabs-mode:nil
03140   fill-column:79
03141   End:
03142 */
03143 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines