PLearn 0.1
FeatureSetNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // FeatureSetNNet.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 
00038 #include "FeatureSetNNet.h"
00039 #include <plearn/vmat/SubVMatrix.h>
00040 //#include <plearn/sys/Profiler.h>
00041 #include <time.h>
00042 #include <stdio.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(FeatureSetNNet, "Feedforward Neural Network for symbolic data represented using features", 
00048                         "Inspired from the NNet class, FeatureSetNNet is simply an extension that deals with\n"
00049                         "feature representations of symbolic data. It can also learn distributed representations\n"
00050                         "for each symbolic input token. The possible targets are defined by the VMatrix's\n"
00051                         "getValues() function.\n");
00052 
00053 FeatureSetNNet::FeatureSetNNet() // DEFAULT VALUES FOR ALL OPTIONS
00054     :
00055 rgen(new PRandom()),
00056 nhidden(0),
00057 nhidden2(0),
00058 weight_decay(0),
00059 bias_decay(0),
00060 layer1_weight_decay(0),
00061 layer1_bias_decay(0),
00062 layer2_weight_decay(0),
00063 layer2_bias_decay(0),
00064 output_layer_weight_decay(0),
00065 output_layer_bias_decay(0),
00066 direct_in_to_out_weight_decay(0),
00067 output_layer_dist_rep_weight_decay(0),
00068 output_layer_dist_rep_bias_decay(0),
00069 fixed_output_weights(0),
00070 direct_in_to_out(0),
00071 penalty_type("L2_square"),
00072 output_transfer_func(""),
00073 hidden_transfer_func("tanh"),
00074 start_learning_rate(0.01),
00075 decrease_constant(0),
00076 batch_size(1),
00077 stochastic_gradient_descent_speedup(true),
00078 initialization_method("uniform_linear"),
00079 dist_rep_dim(-1),
00080 possible_targets_vary(false)
00081 {}
00082 
00083 FeatureSetNNet::~FeatureSetNNet()
00084 {
00085 }
00086 
00087 void FeatureSetNNet::declareOptions(OptionList& ol)
00088 {
00089     declareOption(ol, "nhidden", &FeatureSetNNet::nhidden, 
00090                   OptionBase::buildoption, 
00091                   "Number of hidden units in first hidden layer (0 means no hidden layer).\n");
00092     
00093     declareOption(ol, "nhidden2", &FeatureSetNNet::nhidden2, 
00094                   OptionBase::buildoption, 
00095                   "Number of hidden units in second hidden layer (0 means no hidden layer).\n");
00096     
00097     declareOption(ol, "weight_decay", &FeatureSetNNet::weight_decay, 
00098                   OptionBase::buildoption, 
00099                   "Global weight decay for all layers.\n");
00100     
00101     declareOption(ol, "bias_decay", &FeatureSetNNet::bias_decay, 
00102                   OptionBase::buildoption, 
00103                   "Global bias decay for all layers.\n");
00104     
00105     declareOption(ol, "layer1_weight_decay", &FeatureSetNNet::layer1_weight_decay, 
00106                   OptionBase::buildoption, 
00107                   "Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00108     
00109     declareOption(ol, "layer1_bias_decay", &FeatureSetNNet::layer1_bias_decay, 
00110                   OptionBase::buildoption, 
00111                   "Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00112     
00113     declareOption(ol, "layer2_weight_decay", &FeatureSetNNet::layer2_weight_decay, 
00114                   OptionBase::buildoption, 
00115                   "Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00116     
00117     declareOption(ol, "layer2_bias_decay", &FeatureSetNNet::layer2_bias_decay, 
00118                   OptionBase::buildoption, 
00119                   "Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00120     
00121     declareOption(ol, "output_layer_weight_decay", 
00122                   &FeatureSetNNet::output_layer_weight_decay, 
00123                   OptionBase::buildoption, 
00124                   "Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00125     
00126     declareOption(ol, "output_layer_bias_decay", 
00127                   &FeatureSetNNet::output_layer_bias_decay, 
00128                   OptionBase::buildoption, 
00129                   "Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00130     
00131     declareOption(ol, "direct_in_to_out_weight_decay", 
00132                   &FeatureSetNNet::direct_in_to_out_weight_decay, 
00133                   OptionBase::buildoption,
00134                   "Additional weight decay for the weights going from the input directly to the output layer.  Is added to 'weight_decay'.\n");
00135     
00136     declareOption(ol, "output_layer_dist_rep_weight_decay", 
00137                   &FeatureSetNNet::output_layer_dist_rep_weight_decay, 
00138                   OptionBase::buildoption, 
00139                   "Additional weight decay for the output layer of distributed representation\n"
00140                   "predictor.  Is added to 'weight_decay'.\n");
00141     
00142     declareOption(ol, "output_layer_dist_rep_bias_decay", 
00143                   &FeatureSetNNet::output_layer_dist_rep_bias_decay, 
00144                   OptionBase::buildoption, 
00145                   "Additional bias decay for the output layer of distributed representation\n"
00146                   "predictor.  Is added to 'bias_decay'.\n");
00147     
00148     declareOption(ol, "fixed_output_weights", 
00149                   &FeatureSetNNet::fixed_output_weights, 
00150                   OptionBase::buildoption, 
00151                   "If true then the output weights are not learned. They are initialized to +1 or -1 randomly.\n");
00152     
00153     declareOption(ol, "direct_in_to_out", &FeatureSetNNet::direct_in_to_out, 
00154                   OptionBase::buildoption, 
00155                   "If true then direct input to output weights will be added (if nhidden > 0).\n");
00156     
00157     declareOption(ol, "penalty_type", &FeatureSetNNet::penalty_type,
00158                   OptionBase::buildoption,
00159                   "Penalty to use on the weights (for weight and bias decay).\n"
00160                   "Can be any of:\n"
00161                   "  - \"L1\": L1 norm,\n"
00162                   "  - \"L2_square\" (default): square of the L2 norm.\n");
00163     
00164     declareOption(ol, "output_transfer_func", 
00165                   &FeatureSetNNet::output_transfer_func, 
00166                   OptionBase::buildoption, 
00167                   "what transfer function to use for ouput layer? One of: \n"
00168                   "  - \"tanh\" \n"
00169                   "  - \"sigmoid\" \n"
00170                   "  - \"softmax\" \n"
00171                   "An empty string or \"none\" means no output transfer function \n");
00172     
00173     declareOption(ol, "hidden_transfer_func", 
00174                   &FeatureSetNNet::hidden_transfer_func, 
00175                   OptionBase::buildoption, 
00176                   "What transfer function to use for hidden units? One of \n"
00177                   "  - \"linear\" \n"
00178                   "  - \"tanh\" \n"
00179                   "  - \"sigmoid\" \n"
00180                   "  - \"softmax\" \n");
00181     
00182     declareOption(ol, "cost_funcs", &FeatureSetNNet::cost_funcs, 
00183                   OptionBase::buildoption, 
00184                   "A list of cost functions to use\n"
00185                   "in the form \"[ cf1; cf2; cf3; ... ]\" where each function is one of: \n"
00186                   "  - \"NLL\" (negative log likelihood -log(p[c]) for classification) \n"
00187                   "  - \"class_error\" (classification error) \n"
00188                   "The FIRST function of the list will be used as \n"
00189                   "the objective function to optimize \n"
00190                   "(possibly with an added weight decay penalty) \n");
00191     
00192     declareOption(ol, "start_learning_rate", &FeatureSetNNet::start_learning_rate, 
00193                   OptionBase::buildoption, 
00194                   "Start learning rate of gradient descent.\n");
00195                   
00196     declareOption(ol, "decrease_constant", &FeatureSetNNet::decrease_constant, 
00197                   OptionBase::buildoption, 
00198                   "Decrease constant of gradient descent.\n");
00199 
00200     declareOption(ol, "batch_size", &FeatureSetNNet::batch_size, 
00201                   OptionBase::buildoption, 
00202                   "How many samples to use to estimate the avergage gradient before updating the weights\n"
00203                   "0 is equivalent to specifying training_set->length() \n");
00204 
00205         declareOption(ol, "stochastic_gradient_descent_speedup", &FeatureSetNNet::stochastic_gradient_descent_speedup, 
00206                   OptionBase::buildoption, 
00207                   "Indication that a trick to speedup stochastic gradient descent\n"
00208                   "should be used.\n");
00209 
00210     declareOption(ol, "initialization_method", 
00211                   &FeatureSetNNet::initialization_method, OptionBase::buildoption, 
00212                   "The method used to initialize the weights:\n"
00213                   " - \"normal_linear\"  = a normal law with variance 1/n_inputs\n"
00214                   " - \"normal_sqrt\"    = a normal law with variance 1/sqrt(n_inputs)\n"
00215                   " - \"uniform_linear\" = a uniform law in [-1/n_inputs, 1/n_inputs]\n"
00216                   " - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(n_inputs), 1/sqrt(n_inputs)]\n"
00217                   " - \"zero\"           = all weights are set to 0\n");
00218     
00219     declareOption(ol, "dist_rep_dim", &FeatureSetNNet::dist_rep_dim, 
00220                   OptionBase::buildoption, 
00221                   " Dimensionality (number of components) of distributed representations.\n"
00222                   "If <= 0, than distributed representations will not be used.\n"
00223         );
00224     
00225     declareOption(ol, "possible_targets_vary", 
00226                   &FeatureSetNNet::possible_targets_vary, OptionBase::buildoption, 
00227                   "Indication that the set of possible targets vary from\n"
00228                   "one input vector to another.\n"
00229         );
00230     
00231     declareOption(ol, "feat_sets", &FeatureSetNNet::feat_sets, 
00232                                 OptionBase::buildoption, 
00233                   "FeatureSets to apply on input. The number of feature\n"
00234                   "sets should be a divisor of inputsize(). The feature\n"
00235                   "sets applied to the ith input field is the feature\n"
00236                   "set at position i % feat_sets.length().\n"
00237         );
00238 
00239     declareOption(ol, "train_set", &FeatureSetNNet::train_set, 
00240                   OptionBase::learntoption, 
00241                   "VMatrix used for training, that also provides information about the data (e.g. Dictionary objects for the different fields).\n");
00242 
00243 
00244                   // Networks' learnt parameters
00245     declareOption(ol, "w1", &FeatureSetNNet::w1, OptionBase::learntoption, 
00246                   "Weights of first hidden layer.\n");
00247     declareOption(ol, "b1", &FeatureSetNNet::b1, OptionBase::learntoption, 
00248                   "Bias of first hidden layer.\n");
00249     declareOption(ol, "w2", &FeatureSetNNet::w2, OptionBase::learntoption, 
00250                   "Weights of second hidden layer.\n");
00251     declareOption(ol, "b2", &FeatureSetNNet::b2, OptionBase::learntoption, 
00252                   "Bias of second hidden layer.\n");
00253     declareOption(ol, "wout", &FeatureSetNNet::wout, OptionBase::learntoption, 
00254                   "Weights of output layer.\n");
00255     declareOption(ol, "bout", &FeatureSetNNet::bout, OptionBase::learntoption, 
00256                   "Bias of output layer.\n");
00257     declareOption(ol, "direct_wout", &FeatureSetNNet::direct_wout, 
00258                   OptionBase::learntoption, 
00259                   "Direct input to output weights.\n");
00260     declareOption(ol, "direct_bout", &FeatureSetNNet::direct_bout, 
00261                   OptionBase::learntoption, 
00262                   "Direct input to output bias.\n");
00263     declareOption(ol, "wout_dist_rep", &FeatureSetNNet::wout_dist_rep, 
00264                   OptionBase::learntoption, 
00265                   "Weights of output layer for distributed representation predictor.\n");
00266     declareOption(ol, "bout_dist_rep", &FeatureSetNNet::bout_dist_rep, 
00267                   OptionBase::learntoption, 
00268                   "Bias of output layer for distributed representation predictor.\n");
00269 
00270     inherited::declareOptions(ol);
00271 
00272 }
00273 
00275 // build //
00277 void FeatureSetNNet::build()
00278 {
00279     inherited::build();
00280     build_();
00281 }
00282 
00283 
00285 // build_ //
00287 void FeatureSetNNet::build_()
00288 {
00289     // Don't do anything if we don't have a train_set
00290     // It's the only one who knows the inputsize, targetsize and weightsize
00291 
00292     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00293     {
00294         if(targetsize_ != 1)
00295             PLERROR("In FeatureSetNNet::build_(): targetsize_ must be 1, not %d",targetsize_);
00296 
00297         n_feat_sets = feat_sets.length();
00298 
00299         if(n_feat_sets == 0)
00300             PLERROR("In FeatureSetNNet::build_(): at least one FeatureSet must be provided\n");
00301         
00302         if(inputsize_ % n_feat_sets != 0)
00303             PLERROR("In FeatureSetNNet::build_(): feat_sets.length() must be a divisor of inputsize()");
00304         
00305         // Process penalty type option
00306         string pt = lowerstring( penalty_type );
00307         if( pt == "l1" )
00308             penalty_type = "L1";
00309         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00310             penalty_type = "L2_square";
00311         else if( pt == "l2" )
00312         {
00313             PLWARNING("In FeatureSetNNet::build_(): L2 penalty not supported, assuming you want L2 square");
00314             penalty_type = "L2_square";
00315         }
00316         else
00317             PLERROR("In FeatureSetNNet::build_(): penalty_type \"%s\" not supported", penalty_type.c_str());
00318         
00319         int ncosts = cost_funcs.size();  
00320         if(ncosts<=0)
00321             PLERROR("In FeatureSetNNet::build_(): Empty cost_funcs : must at least specify the cost function to optimize!");
00322         
00323         if(stage <= 0 ) // Training hasn't started
00324         {
00325             // Initialize parameters
00326             initializeParams();                        
00327         }
00328         
00329         output_comp.resize(total_output_size);
00330         row.resize(train_set->width());
00331         row.fill(MISSING_VALUE);
00332         feats.resize(inputsize_);
00333         // Making sure that all feats[i] have non null storage...
00334         for(int i=0; i<feats.length(); i++)
00335         {
00336             feats[i].resize(1);
00337             feats[i].resize(0);
00338         }
00339         if(fixed_output_weights && stochastic_gradient_descent_speedup)
00340             PLERROR("In  FeatureSetNNet::build_(): cannot use stochastic gradient descent speedup with fixed output weights");
00341         val_string_reference_set = train_set;
00342         target_values_reference_set = train_set;
00343     }
00344 }
00345 
00346 void FeatureSetNNet::fprop(const Vec& inputv, Vec& outputv, const Vec& targetv, Vec& costsv, real sampleweight) const
00347 {
00348     
00349     fpropOutput(inputv,outputv);
00350     //if(is_missing(outputv[0]))
00351     //    cout << "What the fuck" << endl;
00352     fpropCostsFromOutput(inputv, outputv, targetv, costsv, sampleweight);
00353     //if(is_missing(costsv[0]))
00354     //    cout << "Re-What the fuck" << endl;
00355 
00356 }
00357 
00358 void FeatureSetNNet::fpropOutput(const Vec& inputv, Vec& outputv) const
00359 {
00360     // Get possible target values
00361     if(possible_targets_vary) 
00362     {
00363         row.subVec(0,inputsize_) << inputv;
00364         target_values_reference_set->getValues(row,inputsize_,target_values);
00365         outputv.resize(target_values.length());
00366     }
00367 
00368     // Get features
00369     ni = inputsize_;
00370     nfeats = 0;
00371     for(int i=0; i<ni; i++)
00372     {
00373         str = val_string_reference_set->getValString(i,inputv[i]);
00374         feat_sets[i%n_feat_sets]->getFeatures(str,feats[i]);
00375         nfeats += feats[i].length();
00376     }
00377     
00378     feat_input.resize(nfeats);
00379     if(dist_rep_dim<=0) nnet_input = feat_input; // Keep sizes synchronized
00380 
00381     offset = 0;
00382     id = 0;
00383     for(int i=0; i<ni; i++)
00384     {
00385         f = feats[i].data();
00386         nj = feats[i].length();
00387         for(int j=0; j<nj; j++)
00388             feat_input[id++] = offset + *f++;
00389         if(dist_rep_dim <= 0 || ((i+1) % n_feat_sets != 0))
00390             offset += feat_sets[i % n_feat_sets]->size();
00391         else
00392             offset = 0;
00393     }
00394 
00395     // Fprop to output
00396     if(dist_rep_dim > 0) // x -> d(x)
00397     {        
00398         nfeats = 0;
00399         id = 0;
00400         for(int i=0; i<inputsize_;)
00401         {
00402             ifeats = 0;
00403             for(int j=0; j<n_feat_sets; j++,i++)
00404                 ifeats += feats[i].length();
00405             
00406             add_affine_transform(feat_input.subVec(nfeats,ifeats),
00407                                  wout_dist_rep, bout_dist_rep,
00408                                  nnet_input.subVec(id*dist_rep_dim,dist_rep_dim),
00409                                       true, false);
00410             nfeats += ifeats;
00411             id++;
00412         }
00413 
00414         if(nhidden>0) // d(x) -> h1(d(x))
00415         {
00416             add_affine_transform(nnet_input,w1,b1,hiddenv,false,false);
00417             add_transfer_func(hiddenv);
00418 
00419             if(nhidden2>0) // h1(d(x)) -> h2(h1(d(x)))
00420             {
00421                 add_affine_transform(hiddenv,w2,b2,hidden2v,false,false);
00422                 add_transfer_func(hidden2v);
00423                 last_layer = hidden2v;
00424             }
00425             else
00426                 last_layer = hiddenv;
00427         }
00428         else
00429             last_layer = nnet_input;
00430 
00431         // d(x),h1(d(x)),h2(h1(d(x))) -> o(x)
00432 
00433         add_affine_transform(last_layer,wout,bout,outputv,false,
00434                              possible_targets_vary,target_values);            
00435         if(direct_in_to_out && nhidden>0)
00436             add_affine_transform(nnet_input,direct_wout,direct_bout,
00437                                  outputv,false,possible_targets_vary,target_values);
00438     }
00439     else
00440     {        
00441         if(nhidden>0) // x -> h1(x)
00442         {
00443             add_affine_transform(feat_input,w1,b1,hiddenv,true,false);
00444             // Transfert function
00445             add_transfer_func(hiddenv);
00446 
00447             if(nhidden2>0) // h1(x) -> h2(h1(x))
00448             {
00449                 add_affine_transform(hiddenv,w2,b2,hidden2v,true,false);
00450                 add_transfer_func(hidden2v);
00451                 last_layer = hidden2v;
00452             }
00453             else
00454                 last_layer = hiddenv;
00455         }
00456         else
00457             last_layer = feat_input;
00458         // x, h1(x),h2(h1(x)) -> o(x)
00459         add_affine_transform(last_layer,wout,bout,outputv,nhidden<=0,
00460                              possible_targets_vary,target_values);            
00461         if(direct_in_to_out && nhidden>0)
00462             add_affine_transform(feat_input,direct_wout,direct_bout,
00463                                  outputv,true,possible_targets_vary,target_values);
00464     }
00465 
00466                                
00467     if (nhidden2>0 && nhidden<=0)
00468         PLERROR("FeatureSetNNet::fprop(): can't have nhidden2 (=%d) > 0 while nhidden=0",nhidden2);
00469     
00470     if(output_transfer_func!="" && output_transfer_func!="none")
00471        add_transfer_func(outputv, output_transfer_func);
00472 }
00473 
00474 void FeatureSetNNet::fpropCostsFromOutput(const Vec& inputv, const Vec& outputv, const Vec& targetv, Vec& costsv, real sampleweight) const
00475 {
00476     //Compute cost
00477 
00478     if(possible_targets_vary)
00479     {
00480         reind_target = target_values.find(targetv[0]);
00481         if(reind_target<0)
00482             PLERROR("In FeatureSetNNet::fprop(): target %d is not in possible targets", targetv[0]);
00483     }
00484     else
00485         reind_target = (int)targetv[0];
00486 
00487     // Build cost function
00488 
00489     int ncosts = cost_funcs.size();
00490     for(int k=0; k<ncosts; k++)
00491     {
00492         if(cost_funcs[k]=="NLL") 
00493         {
00494             costsv[k] = sampleweight*nll(outputv,reind_target);
00495         }
00496         else if(cost_funcs[k]=="class_error")
00497             costsv[k] = sampleweight*classification_loss(outputv, reind_target);
00498         else 
00499             PLERROR("In FeatureSetNNet::fprop(): unknown cost_func option: %s",cost_funcs[k].c_str());        
00500     }
00501 }
00502 
00503 void FeatureSetNNet::bprop(Vec& inputv, Vec& outputv, Vec& targetv, Vec& costsv, real learning_rate, real sampleweight)
00504 {
00505     if(possible_targets_vary) 
00506     {
00507         gradient_outputv.resize(target_values.length());
00508         gradient_act_outputv.resize(target_values.length());
00509         if(!stochastic_gradient_descent_speedup)
00510             target_values_since_last_update.append(target_values);
00511     }
00512 
00513     if(!stochastic_gradient_descent_speedup)
00514         feats_since_last_update.append(feat_input);
00515 
00516     // Gradient through cost
00517     if(cost_funcs[0]=="NLL") 
00518     {
00519         // Permits to avoid numerical precision errors
00520         if(output_transfer_func == "softmax")
00521             gradient_outputv[reind_target] = learning_rate*sampleweight;
00522         else
00523             gradient_outputv[reind_target] = learning_rate*sampleweight/(outputv[reind_target]);            
00524     }
00525     else if(cost_funcs[0]=="class_error")
00526     {
00527         PLERROR("FeatureSetNNet::bprop(): gradient cannot be computed for \"class_error\" cost");
00528     }
00529 
00530     // Gradient through output transfer function
00531     if(output_transfer_func != "linear")
00532     {
00533         if(cost_funcs[0]=="NLL" && output_transfer_func == "softmax")
00534             gradient_transfer_func(outputv,gradient_act_outputv, gradient_outputv,
00535                                     output_transfer_func, reind_target);
00536         else
00537             gradient_transfer_func(outputv,gradient_act_outputv, gradient_outputv,
00538                                     output_transfer_func);
00539         gradient_last_layer = gradient_act_outputv;
00540     }
00541     else
00542         gradient_last_layer = gradient_act_outputv;
00543     
00544     // Gradient through output affine transform
00545 
00546 
00547     if(nhidden2 > 0) {
00548         gradient_affine_transform(hidden2v, wout, bout, gradient_hidden2v, 
00549                                   gradient_wout, gradient_bout, gradient_last_layer,
00550                                   false, possible_targets_vary, learning_rate, 
00551                                   weight_decay+output_layer_weight_decay,
00552                                   bias_decay+output_layer_bias_decay,
00553                                   target_values);
00554     }
00555     else if(nhidden > 0) 
00556     {
00557         gradient_affine_transform(hiddenv, wout, bout, gradient_hiddenv,
00558                                   gradient_wout, gradient_bout, gradient_last_layer,
00559                                   false, possible_targets_vary, learning_rate, 
00560                                   weight_decay+output_layer_weight_decay,
00561                                   bias_decay+output_layer_bias_decay, target_values);
00562     }
00563     else
00564     {
00565         gradient_affine_transform(nnet_input, wout, bout, gradient_nnet_input, 
00566                                   gradient_wout, gradient_bout, gradient_last_layer,
00567                                   (dist_rep_dim <= 0), possible_targets_vary, learning_rate, 
00568                                   weight_decay+output_layer_weight_decay,
00569                                   bias_decay+output_layer_bias_decay, target_values);
00570     }
00571 
00572 
00573     if(nhidden2 > 0)
00574     {
00575         gradient_transfer_func(hidden2v,gradient_act_hidden2v,gradient_hidden2v);
00576         gradient_affine_transform(hiddenv, w2, b2, gradient_hiddenv, 
00577                                   gradient_w2, gradient_b2, gradient_act_hidden2v,
00578                                   false, false,learning_rate, 
00579                                   weight_decay+layer2_weight_decay,
00580                                   bias_decay+layer2_bias_decay);
00581     }
00582     if(nhidden > 0)
00583     {
00584         gradient_transfer_func(hiddenv,gradient_act_hiddenv,gradient_hiddenv);  
00585         gradient_affine_transform(nnet_input, w1, b1, gradient_nnet_input, 
00586                                   gradient_w1, gradient_b1, gradient_act_hiddenv,
00587                                   dist_rep_dim<=0, false,learning_rate, 
00588                                   weight_decay+layer1_weight_decay,
00589                                   bias_decay+layer1_bias_decay);
00590     }
00591 
00592     if(nhidden>0 && direct_in_to_out)
00593     {
00594         gradient_affine_transform(nnet_input, direct_wout, direct_bout,
00595                                   gradient_nnet_input, 
00596                                   gradient_direct_wout, gradient_direct_bout,
00597                                   gradient_last_layer,
00598                                   dist_rep_dim<=0, possible_targets_vary,learning_rate, 
00599                                   weight_decay+direct_in_to_out_weight_decay,
00600                                   0,
00601                                   target_values);
00602     }
00603 
00604     if(dist_rep_dim > 0)
00605     {
00606         nfeats = 0;
00607         id = 0;
00608         for(int i=0; i<inputsize_; )
00609         {
00610             ifeats = 0;
00611             for(int j=0; j<n_feat_sets; j++,i++)
00612                 ifeats += feats[i].length();
00613             gradient_affine_transform(feat_input.subVec(nfeats,ifeats),
00614                                       wout_dist_rep, bout_dist_rep,
00615                                       //gradient_feat_input.subVec(nfeats,feats[i].length()),
00616                                       gradient_feat_input,// Useless anyways...
00617                                       gradient_wout_dist_rep,
00618                                       gradient_bout_dist_rep,
00619                                       gradient_nnet_input.subVec(id*dist_rep_dim,dist_rep_dim),
00620                                       true, false, learning_rate, 
00621                                       weight_decay+output_layer_dist_rep_weight_decay,
00622                                       bias_decay+output_layer_dist_rep_bias_decay);
00623             nfeats += ifeats;
00624             id++;
00625         }
00626     }
00627     clearProppathGradient();
00628 }
00629 
00630 void FeatureSetNNet::update()
00631 {
00632 
00633     if(dist_rep_dim > 0)
00634     {
00635         update_affine_transform(feats_since_last_update, wout_dist_rep, 
00636                                 bout_dist_rep, gradient_wout_dist_rep,
00637                                 gradient_bout_dist_rep, true, false,
00638                                 target_values_since_last_update);
00639     }
00640 
00641     if(nhidden>0) 
00642     {
00643         update_affine_transform(feats_since_last_update, w1, b1, 
00644                                 gradient_w1, gradient_b1,
00645                                 dist_rep_dim<=0, false,
00646                                 target_values_since_last_update);
00647         if(nhidden2>0) 
00648         {
00649             update_affine_transform(feats_since_last_update, w2, b2, 
00650                                     gradient_w2, gradient_b2,
00651                                     false, false,
00652                                     target_values_since_last_update);
00653         }
00654 
00655         update_affine_transform(feats_since_last_update, wout, bout, 
00656                                 gradient_wout, gradient_bout,
00657                                 false, possible_targets_vary,
00658                                 target_values_since_last_update);
00659         if(direct_in_to_out)
00660         {
00661             update_affine_transform(feats_since_last_update, direct_wout, 
00662                                     direct_bout, 
00663                                     gradient_direct_wout, gradient_direct_bout,
00664                                     false, possible_targets_vary,
00665                                     target_values_since_last_update);
00666         }
00667     }
00668     else
00669     {
00670         update_affine_transform(feats_since_last_update, wout, bout, 
00671                                 gradient_wout, gradient_bout,
00672                                 dist_rep_dim<=0, possible_targets_vary,
00673                                 target_values_since_last_update);
00674     }
00675 
00676     feats_since_last_update.resize(0);
00677     target_values_since_last_update.resize(0);
00678 }
00679 
00680 void FeatureSetNNet::update_affine_transform(
00681     Vec input, Mat weights, Vec bias,
00682     Mat gweights, Vec gbias,
00683     bool input_is_sparse, bool output_is_sparse,
00684     Vec output_indices) 
00685 {
00686     // Bias
00687     if(bias.length() != 0)
00688     {
00689         if(output_is_sparse)
00690         {
00691             pval1 = gbias.data();
00692             pval2 = bias.data();
00693             pval3 = output_indices.data();
00694             ni = output_indices.length();
00695             for(int i=0; i<ni; i++)
00696             {
00697                 pval2[(int)*pval3] += pval1[(int)*pval3];
00698                 pval1[(int)*pval3] = 0;
00699                 pval3++;
00700             }
00701         }
00702         else
00703         {
00704             pval1 = gbias.data();
00705             pval2 = bias.data();
00706             ni = bias.length();
00707             for(int i=0; i<ni; i++)
00708             {
00709                 *pval2 += *pval1;
00710                 *pval1 = 0;
00711                 pval1++; 
00712                 pval2++;
00713             }
00714         }
00715     }
00716 
00717     // Weights
00718     if(!input_is_sparse && !output_is_sparse)
00719     {
00720         if(!gweights.isCompact() || !weights.isCompact())
00721             PLERROR("In FeatureSetNNet::update_affine_transform(): weights or gweights is not a compact TMat");
00722         ni = weights.length();
00723         nj = weights.width();
00724         pval1 = gweights.data();
00725         pval2 = weights.data();
00726         for(int i=0; i<ni; i++)
00727             for(int j=0; j<nj; j++)
00728             {
00729                 *pval2 += *pval1;
00730                 *pval1 = 0;
00731                 pval1++;
00732                 pval2++;
00733             }
00734     }
00735     else if(!input_is_sparse && output_is_sparse)
00736     {
00737         ni = output_indices.length();
00738         nj = input.length();
00739         pval3 = output_indices.data();
00740         for(int i=0; i<ni; i++)
00741         {
00742             for(int j=0; j<nj; j++)
00743             {
00744                 weights(j,(int)*pval3) += gweights(j,(int)*pval3);
00745                 gweights(j,(int)*pval3) = 0;
00746             }
00747             pval3++;
00748         }
00749     }
00750     else if(input_is_sparse && !output_is_sparse)
00751     {
00752         ni = input.length();
00753         nj = weights.width();
00754         pval3 = input.data();
00755         for(int i=0; i<ni; i++)
00756         {
00757             pval1 = gweights[(int)(*pval3)];
00758             pval2 = weights[(int)(*pval3++)];
00759             for(int j=0; j<nj;j++)
00760             {
00761                 *pval2 += *pval1;
00762                 *pval1 = 0;
00763                 pval1++;
00764                 pval2++;
00765             }
00766         }
00767     }
00768     else if(input_is_sparse && output_is_sparse)
00769     {
00770         // Weights
00771         ni = input.length();
00772         nj = output_indices.length();
00773         pval2 = input.data();
00774         for(int i=0; i<ni; i++)
00775         {
00776             pval3 = output_indices.data();
00777             for(int j=0; j<nj; j++)
00778             {
00779                 weights((int)(*pval2),(int)*pval3) += gweights((int)(*pval2),(int)*pval3);
00780                 gweights((int)(*pval2),(int)*pval3) = 0;
00781                 pval3++;
00782             }
00783             pval2++;
00784         }
00785     }
00786 }
00787 
00789 void FeatureSetNNet::clearProppathGradient()
00790 {
00791     // Trick to make clearProppathGradient faster...
00792     if(cost_funcs[0]=="NLL") 
00793         gradient_outputv[reind_target] = 0;
00794     else
00795         gradient_outputv.clear();
00796     gradient_act_outputv.clear();
00797     
00798     if(dist_rep_dim>0)
00799         gradient_nnet_input.clear();
00800 
00801     if(nhidden>0) 
00802     {
00803         gradient_hiddenv.clear();
00804         gradient_act_hiddenv.clear();
00805         if(nhidden2>0) 
00806         {
00807             gradient_hidden2v.clear();
00808             gradient_act_hidden2v.clear();
00809         }
00810     }
00811 }
00812 
00813 
00815 // computeCostsFromOutputs //
00817 void FeatureSetNNet::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv, 
00818                                    const Vec& targetv, Vec& costsv) const
00819 {
00820     PLERROR("In FeatureSetNNet::computeCostsFromOutputs(): output is not enough to compute costs");
00821 }
00822 
00823 int FeatureSetNNet::my_argmax(const Vec& vec, int default_compare) const
00824 {
00825 #ifdef BOUNDCHECK
00826     if(vec.length()==0)
00827         PLERROR("IN int argmax(const TVec<T>& vec) vec has zero length");
00828 #endif
00829     real* v = vec.data();
00830     int indexmax = default_compare;
00831     real maxval = v[default_compare];
00832     for(int i=0; i<vec.length(); i++)
00833         if(v[i]>maxval)
00834         {
00835             maxval = v[i];
00836             indexmax = i;
00837         }
00838     return indexmax;
00839 }
00840 
00842 // computeOutput //
00844 void FeatureSetNNet::computeOutput(const Vec& inputv, Vec& outputv) const
00845 {
00846     fpropOutput(inputv, output_comp);
00847     if(possible_targets_vary)
00848     {
00849         //row.subVec(0,inputsize_) << inputv;
00850         //target_values_reference_set->getValues(row,inputsize_,target_values);
00851         outputv[0] = target_values[my_argmax(output_comp,rgen->uniform_multinomial_sample(output_comp.length()))];
00852     }
00853     else
00854         outputv[0] = argmax(output_comp);
00855 }
00856 
00858 // computeOutputAndCosts //
00860 void FeatureSetNNet::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
00861                                  Vec& outputv, Vec& costsv) const
00862 {
00863     fprop(inputv,output_comp,targetv,costsv);
00864     if(possible_targets_vary)
00865     {
00866         //row.subVec(0,inputsize_) << inputv;
00867         //target_values_reference_set->getValues(row,inputsize_,target_values);
00868         outputv[0] = target_values[my_argmax(output_comp,rgen->uniform_multinomial_sample(output_comp.length()))];
00869     }
00870     else
00871         outputv[0] = argmax(output_comp);
00872 }
00873 
00875 // fillWeights //
00877 void FeatureSetNNet::fillWeights(const Mat& weights) {
00878     if (initialization_method == "zero") {
00879         weights.clear();
00880         return;
00881     }
00882     real delta;
00883     int is = weights.length();
00884     if (initialization_method.find("linear") != string::npos)
00885         delta = 1.0 / real(is);
00886     else
00887         delta = 1.0 / sqrt(real(is));
00888     if (initialization_method.find("normal") != string::npos)
00889         rgen->fill_random_normal(weights, 0, delta);
00890     else
00891         rgen->fill_random_uniform(weights, -delta, delta);
00892 }
00893 
00895 // forget //
00897 void FeatureSetNNet::forget()
00898 {
00899     if (train_set) build();
00900     total_updates=0;
00901     stage = 0;
00902 }
00903 
00905 // getTrainCostNames //
00907 TVec<string> FeatureSetNNet::getTrainCostNames() const
00908 {
00909     return cost_funcs;
00910 }
00911 
00913 // getTestCostNames //
00915 TVec<string> FeatureSetNNet::getTestCostNames() const
00916 { 
00917     return cost_funcs;
00918 }
00919 
00921 // add_transfer_func //
00923 void FeatureSetNNet::add_transfer_func(const Vec& input, string transfer_func) const
00924 {
00925     if (transfer_func == "default")
00926         transfer_func = hidden_transfer_func;
00927     if(transfer_func=="linear")
00928         return;
00929     else if(transfer_func=="tanh")
00930     {
00931         compute_tanh(input,input);
00932         return;
00933     }        
00934     else if(transfer_func=="sigmoid")
00935     {
00936         compute_sigmoid(input,input);
00937         return;
00938     }
00939     else if(transfer_func=="softmax")
00940     {
00941         compute_softmax(input,input);
00942         return;
00943     }
00944     else PLERROR("In FeatureSetNNet::add_transfer_func(): Unknown value for transfer_func: %s",transfer_func.c_str());
00945 }
00946 
00948 // gradient_transfer_func //
00950 void FeatureSetNNet::gradient_transfer_func(Vec& output, Vec& gradient_input, Vec& gradient_output, string transfer_func, int nll_softmax_speed_up_target) {
00951     if (transfer_func == "default")        
00952         transfer_func = hidden_transfer_func;
00953     if(transfer_func=="linear")
00954     {
00955         pval1 = gradient_output.data();
00956         pval2 = gradient_input.data();
00957         ni = output.length();
00958         for(int i=0; i<ni; i++)
00959             *pval2++ += *pval1++;
00960         return;
00961     }
00962     else if(transfer_func=="tanh")
00963     {
00964         pval1 = gradient_output.data();
00965         pval2 = output.data();
00966         pval3 = gradient_input.data();
00967         ni = output.length();
00968         for(int i=0; i<ni; i++)
00969             *pval3++ += (*pval1++)*(1.0-square(*pval2++));
00970         return;
00971     }        
00972     else if(transfer_func=="sigmoid")
00973     {
00974         pval1 = gradient_output.data();
00975         pval2 = output.data();
00976         pval3 = gradient_input.data();
00977         ni = output.length();
00978         for(int i=0; i<ni; i++)
00979         {
00980             *pval3++ += (*pval1++)*(*pval2)*(1.0-*pval2);
00981             pval2++;
00982         }   
00983         return;
00984     }
00985     else if(transfer_func=="softmax")
00986     {
00987         if(nll_softmax_speed_up_target<0)
00988         {            
00989             pval3 = gradient_input.data();
00990             ni = nk = output.length();
00991             for(int i=0; i<ni; i++)
00992             {
00993                 val = output[i];
00994                 pval1 = gradient_output.data();
00995                 pval2 = output.data();
00996                 for(int k=0; k<nk; k++)
00997                     if(k!=i)
00998                         *pval3 -= *pval1++ * val * (*pval2++);
00999                     else
01000                     {
01001                         *pval3 += *pval1++ * val * (1.0-val);
01002                         pval2++;
01003                     }
01004                 pval3++;                
01005             }   
01006         }
01007         else // Permits speedup and avoids numerical precision errors
01008         {
01009             pval2 = output.data();
01010             pval3 = gradient_input.data();
01011             ni = output.length();
01012             grad = gradient_output[nll_softmax_speed_up_target];
01013             val = output[nll_softmax_speed_up_target];
01014             for(int i=0; i<ni; i++)
01015             {
01016                 if(nll_softmax_speed_up_target!=i)
01017                     //*pval3++ -= grad * val * (*pval2++);
01018                     *pval3++ -= grad * (*pval2++);
01019                 else
01020                 {
01021                     //*pval3++ += grad * val * (1.0-val);
01022                     *pval3++ += grad * (1.0-val);
01023                     pval2++;
01024                 }
01025             }   
01026         }
01027         return;
01028     }
01029     else PLERROR("In FeatureSetNNet::gradient_transfer_func(): Unknown value for transfer_func: %s",transfer_func.c_str());
01030 }
01031 
01032 void FeatureSetNNet::add_affine_transform(Vec input, Mat weights, Vec bias, Vec output, 
01033                           bool input_is_sparse, bool output_is_sparse,
01034                           Vec output_indices) const
01035 {
01036     // Bias
01037     if(bias.length() != 0)
01038     {
01039         if(output_is_sparse)
01040         {
01041             pval1 = output.data();
01042             pval2 = bias.data();
01043             pval3 = output_indices.data();
01044             ni = output.length();
01045             for(int i=0; i<ni; i++)
01046                 *pval1++ = pval2[(int)*pval3++];
01047         }
01048         else
01049         {
01050             pval1 = output.data();
01051             pval2 = bias.data();
01052             ni = output.length();
01053             for(int i=0; i<ni; i++)
01054                 *pval1++ = *pval2++;
01055         }
01056     }
01057 
01058     // Weights
01059     if(!input_is_sparse && !output_is_sparse)
01060     {
01061         transposeProductAcc(output,weights,input);
01062     }
01063     else if(!input_is_sparse && output_is_sparse)
01064     {
01065         ni = output.length();
01066         nj = input.length();
01067         pval1 = output.data();
01068         pval3 = output_indices.data();
01069         for(int i=0; i<ni; i++)
01070         {
01071             pval2 = input.data();
01072             for(int j=0; j<nj; j++)
01073                 *pval1 += (*pval2++)*weights(j,(int)*pval3);
01074             pval1++;
01075             pval3++;
01076         }
01077     }
01078     else if(input_is_sparse && !output_is_sparse)
01079     {
01080         ni = input.length();
01081         nj = output.length();
01082         if(ni != 0)
01083         {
01084             pval3 = input.data();
01085             for(int i=0; i<ni; i++)
01086             {
01087                 pval1 = output.data();
01088                 pval2 = weights[(int)(*pval3++)];
01089                 for(int j=0; j<nj;j++)
01090                     *pval1++ += *pval2++;
01091             }
01092         }
01093     }
01094     else if(input_is_sparse && output_is_sparse)
01095     {
01096         // Weights
01097         ni = input.length();
01098         nj = output.length();
01099         if(ni != 0)
01100         {
01101             pval2 = input.data();
01102             for(int i=0; i<ni; i++)
01103             {
01104                 pval1 = output.data();
01105                 pval3 = output_indices.data();
01106                 for(int j=0; j<nj; j++)
01107                     *pval1++ += weights((int)(*pval2),(int)*pval3++);
01108                 pval2++;
01109             }
01110         }
01111     }
01112 }
01113 
01114 void FeatureSetNNet::gradient_affine_transform(Vec input, Mat weights, Vec bias, 
01115                                                Vec ginput, Mat gweights, Vec gbias,
01116                                                Vec goutput, bool input_is_sparse, 
01117                                                bool output_is_sparse,
01118                                                real learning_rate,
01119                                                real weight_decay, real bias_decay,
01120                                                Vec output_indices)
01121 {
01122     // Bias
01123     if(bias.length() != 0)
01124     {
01125         if(output_is_sparse)
01126         {
01127             pval1 = gbias.data();
01128             pval2 = goutput.data();
01129             pval3 = output_indices.data();
01130             ni = goutput.length();
01131             
01132             if(fast_exact_is_equal(bias_decay, 0))
01133             {
01134                 // Without bias decay
01135                 for(int i=0; i<ni; i++)
01136                     pval1[(int)*pval3++] += *pval2++;
01137             }
01138             else
01139             {
01140                 // With bias decay
01141                 if(penalty_type == "L2_square")
01142                 {
01143                     pval4 = bias.data();
01144                     val = -two(learning_rate)*bias_decay;
01145                     for(int i=0; i<ni; i++)
01146                     {
01147                         pval1[(int)*pval3] += *pval2++ + val*(pval4[(int)*pval3]);
01148                         pval3++;
01149                     }
01150                 }
01151                 else if(penalty_type == "L1")
01152                 {
01153                     pval4 = bias.data();
01154                     val = -learning_rate*bias_decay;
01155                     for(int i=0; i<ni; i++)
01156                     {
01157                         val2 = pval4[(int)*pval3];
01158                         if(val2 > 0 )
01159                             pval1[(int)*pval3] += *pval2 + val;
01160                         else if(val2 < 0)
01161                             pval1[(int)*pval3] += *pval2 - val;
01162                         pval2++;
01163                         pval3++;
01164                     }
01165                 }
01166             }
01167         }
01168         else
01169         {
01170             pval1 = gbias.data();
01171             pval2 = goutput.data();
01172             ni = goutput.length();
01173             if(fast_exact_is_equal(bias_decay, 0))
01174             {
01175                 // Without bias decay
01176                 for(int i=0; i<ni; i++)
01177                     *pval1++ += *pval2++;
01178             }
01179             else
01180             {
01181                 // With bias decay
01182                 if(penalty_type == "L2_square")
01183                 {
01184                     pval3 = bias.data();
01185                     val = -two(learning_rate)*bias_decay;
01186                     for(int i=0; i<ni; i++)
01187                     {
01188                         *pval1++ += *pval2++ + val * (*pval3++);
01189                     }
01190                 }
01191                 else if(penalty_type == "L1")
01192                 {
01193                     pval3 = bias.data();
01194                     val = -learning_rate*bias_decay;
01195                     for(int i=0; i<ni; i++)
01196                     {
01197                         if(*pval3 > 0)
01198                             *pval1 += *pval2 + val;
01199                         else if(*pval3 < 0)
01200                             *pval1 += *pval2 - val;
01201                         pval1++;
01202                         pval2++;
01203                         pval3++;
01204                     }
01205                 }
01206             }
01207         }
01208     }
01209 
01210     // Weights and input (when appropriate)
01211     if(!input_is_sparse && !output_is_sparse)
01212     {        
01213         // Input
01214         //productAcc(ginput, weights, goutput);
01215         // Weights
01216         //externalProductAcc(gweights, input, goutput);
01217 
01218         // Faster code to do this, which limits the accesses
01219         // to memory
01220 
01221         ni = input.length();
01222         nj = goutput.length();
01223         pval3 = ginput.data();
01224         pval5 = input.data();
01225         
01226         if(fast_exact_is_equal(weight_decay, 0))
01227         {
01228             // Without weight decay
01229             for(int i=0; i<ni; i++) {
01230                 
01231                 pval1 = goutput.data();
01232                 pval2 = weights[i];
01233                 pval4 = gweights[i];
01234                 for(int j=0; j<nj; j++) {
01235                     *pval3 += *pval2 * (*pval1);
01236                     *pval4 += *pval5 * (*pval1);
01237                     pval1++;
01238                     pval2++;
01239                     pval4++;
01240                 }
01241                 pval3++;
01242                 pval5++;
01243             }   
01244         }
01245         else
01246         {
01247             //With weight decay            
01248             if(penalty_type == "L2_square")
01249             {
01250                 val = -two(learning_rate)*weight_decay;
01251                 for(int i=0; i<ni; i++) {   
01252                     pval1 = goutput.data();
01253                     pval2 = weights[i];
01254                     pval4 = gweights[i];
01255                     for(int j=0; j<nj; j++) {
01256                         *pval3 += *pval2 * (*pval1);
01257                         *pval4 += *pval5 * (*pval1) + val * (*pval2);
01258                         pval1++;
01259                         pval2++;
01260                         pval4++;
01261                     }
01262                     pval3++;
01263                     pval5++;
01264                 }
01265             }
01266             else if(penalty_type == "L1")
01267             {
01268                 val = -learning_rate*weight_decay;
01269                 for(int i=0; i<ni; i++) {
01270                     
01271                     pval1 = goutput.data();
01272                     pval2 = weights[i];
01273                     pval4 = gweights[i];
01274                     for(int j=0; j<nj; j++) {
01275                         *pval3 += *pval2 * (*pval1);
01276                         if(*pval2 > 0)
01277                             *pval4 += *pval5 * (*pval1) + val;
01278                         else if(*pval2 < 0)
01279                             *pval4 += *pval5 * (*pval1) - val;
01280                         pval1++;
01281                         pval2++;
01282                         pval4++;
01283                     }
01284                     pval3++;
01285                     pval5++;
01286                 }
01287             }
01288         }
01289     }
01290     else if(!input_is_sparse && output_is_sparse)
01291     {
01292         ni = goutput.length();
01293         nj = input.length();
01294         pval1 = goutput.data();
01295         pval3 = output_indices.data();
01296         
01297         if(fast_exact_is_equal(weight_decay, 0))
01298         {
01299             // Without weight decay
01300             for(int i=0; i<ni; i++)
01301             {
01302                 pval2 = input.data();
01303                 pval4 = ginput.data();
01304                 for(int j=0; j<nj; j++)
01305                 {
01306                     // Input
01307                     *pval4++ += weights(j,(int)(*pval3))*(*pval1);
01308                     // Weights
01309                     gweights(j,(int)(*pval3)) += (*pval2++)*(*pval1);
01310                 }
01311                 pval1++;
01312                 pval3++;
01313             }
01314         }
01315         else
01316         {
01317             // With weight decay
01318             if(penalty_type == "L2_square")
01319             {
01320                 val = -two(learning_rate)*weight_decay;
01321                 for(int i=0; i<ni; i++)
01322                 {
01323                     pval2 = input.data();
01324                     pval4 = ginput.data();
01325                     for(int j=0; j<nj; j++)
01326                     {
01327                         val2 = weights(j,(int)(*pval3));
01328                         // Input
01329                         *pval4++ += val2*(*pval1);
01330                         // Weights
01331                         gweights(j,(int)(*pval3)) += (*pval2++)*(*pval1) + val*val2;
01332                     }
01333                     pval1++;
01334                     pval3++;
01335                 }
01336             }
01337             else if(penalty_type == "L1")
01338             {
01339                 val = -learning_rate*weight_decay;
01340                 for(int i=0; i<ni; i++)
01341                 {
01342                     pval2 = input.data();
01343                     pval4 = ginput.data();
01344                     for(int j=0; j<nj; j++)
01345                     {
01346                         val2 = weights(j,(int)(*pval3));
01347                         // Input
01348                         *pval4++ += val2*(*pval1);
01349                         // Weights
01350                         if(val2 > 0)
01351                             gweights(j,(int)(*pval3)) += (*pval2)*(*pval1) + val;
01352                         else if(val2 < 0)
01353                             gweights(j,(int)(*pval3)) += (*pval2)*(*pval1) - val;
01354                         pval2++;
01355                     }
01356                     pval1++;
01357                     pval3++;
01358                 }
01359             }
01360         }
01361     }
01362     else if(input_is_sparse && !output_is_sparse)
01363     {
01364         ni = input.length();
01365         nj = goutput.length();
01366 
01367         if(fast_exact_is_equal(weight_decay, 0))
01368         {
01369             // Without weight decay
01370             if(ni != 0)
01371             {
01372                 pval3 = input.data();
01373                 for(int i=0; i<ni; i++)
01374                 {
01375                     pval1 = goutput.data();
01376                     pval2 = gweights[(int)(*pval3++)];
01377                     for(int j=0; j<nj;j++)
01378                         *pval2++ += *pval1++;
01379                 }
01380             }
01381         }
01382         else
01383         {
01384             // With weight decay
01385             if(penalty_type == "L2_square")
01386             {
01387                 if(ni != 0)
01388                 {
01389                     pval3 = input.data();                    
01390                     val = -two(learning_rate)*weight_decay;
01391                     for(int i=0; i<ni; i++)
01392                     {
01393                         pval1 = goutput.data();
01394                         pval2 = gweights[(int)(*pval3)];
01395                         pval4 = weights[(int)(*pval3++)];
01396                         for(int j=0; j<nj;j++)
01397                         {
01398                             *pval2++ += *pval1++ + val * (*pval4++);
01399                         }
01400                     }
01401                 }
01402             }
01403             else if(penalty_type == "L1")
01404             {
01405                 if(ni != 0)
01406                 {
01407                     pval3 = input.data();
01408                     val = learning_rate*weight_decay;
01409                     for(int i=0; i<ni; i++)
01410                     {
01411                         pval1 = goutput.data();
01412                         pval2 = gweights[(int)(*pval3)];
01413                         pval4 = weights[(int)(*pval3++)];
01414                         for(int j=0; j<nj;j++)
01415                         {
01416                             if(*pval4 > 0)
01417                                 *pval2 += *pval1 + val;
01418                             else if(*pval4 < 0)
01419                                 *pval2 += *pval1 - val;
01420                             pval1++;
01421                             pval2++;
01422                             pval4++;
01423                         }
01424                     }
01425                 }
01426             }
01427         }
01428     }
01429     else if(input_is_sparse && output_is_sparse)
01430     {
01431         ni = input.length();
01432         nj = goutput.length();
01433 
01434         if(fast_exact_is_equal(weight_decay, 0))
01435         {
01436             // Without weight decay
01437             if(ni != 0)
01438             {
01439                 pval2 = input.data();
01440                 for(int i=0; i<ni; i++)
01441                 {
01442                     pval1 = goutput.data();
01443                     pval3 = output_indices.data();
01444                     for(int j=0; j<nj; j++)
01445                         gweights((int)(*pval2),(int)*pval3++) += *pval1++;
01446                     pval2++;
01447                 }
01448             }
01449         }
01450         else
01451         {
01452             // With weight decay
01453             if(penalty_type == "L2_square")
01454             {
01455                 if(ni != 0)
01456                 {
01457                     pval2 = input.data();
01458                     val = -two(learning_rate)*weight_decay;                    
01459                     for(int i=0; i<ni; i++)
01460                     {
01461                         pval1 = goutput.data();
01462                         pval3 = output_indices.data();
01463                         for(int j=0; j<nj; j++)
01464                         {
01465                             gweights((int)(*pval2),(int)*pval3) 
01466                                 += *pval1++ 
01467                                 + val * weights((int)(*pval2),(int)*pval3);
01468                             pval3++;
01469                         }
01470                         pval2++;
01471                     }
01472                 }
01473             }
01474             else if(penalty_type == "L1")
01475             {
01476                 if(ni != 0)
01477                 {
01478                     pval2 = input.data();
01479                     val = -learning_rate*weight_decay;                    
01480                     for(int i=0; i<ni; i++)
01481                     {
01482                         pval1 = goutput.data();
01483                         pval3 = output_indices.data();
01484                         for(int j=0; j<nj; j++)
01485                         {
01486                             val2 = weights((int)(*pval2),(int)*pval3);
01487                             if(val2 > 0)
01488                                 gweights((int)(*pval2),(int)*pval3) 
01489                                     += *pval1 + val;
01490                             else if(val2 < 0)
01491                                 gweights((int)(*pval2),(int)*pval3) 
01492                                     += *pval1 - val;
01493                             pval1++;
01494                             pval3++;
01495                         }
01496                         pval2++;
01497                     }
01498                 }
01499             }
01500         }
01501     }
01502 
01503 //    gradient_penalty(input,weights,bias,gweights,gbias,input_is_sparse,output_is_sparse,
01504 //                     learning_rate,weight_decay,bias_decay,output_indices);
01505 }
01506 
01507 void FeatureSetNNet::gradient_penalty(Vec input, Mat weights, Vec bias, 
01508                                   Mat gweights, Vec gbias,
01509                                   bool input_is_sparse, bool output_is_sparse,
01510                                   real learning_rate,
01511                                   real weight_decay, real bias_decay,
01512                                   Vec output_indices)
01513 {
01514     // Bias
01515     if(!fast_exact_is_equal(bias_decay, 0) && !fast_exact_is_equal(bias.length(), 0) )
01516     {
01517         if(output_is_sparse)
01518         {
01519             pval1 = gbias.data();
01520             pval2 = bias.data();
01521             pval3 = output_indices.data();
01522             ni = output_indices.length();            
01523             if(penalty_type == "L2_square")
01524             {
01525                 val = -two(learning_rate)*bias_decay;
01526                 for(int i=0; i<ni; i++)
01527                 {
01528                     pval1[(int)*pval3] += val*(pval2[(int)*pval3]);
01529                     pval3++;
01530                 }
01531             }
01532             else if(penalty_type == "L1")
01533             {
01534                 val = -learning_rate*bias_decay;
01535                 for(int i=0; i<ni; i++)
01536                 {
01537                     val2 = pval2[(int)*pval3];
01538                     if(val2 > 0 )
01539                         pval1[(int)*pval3++] += val;
01540                     else if(val2 < 0)
01541                         pval1[(int)*pval3++] -= val;
01542                 }
01543             }
01544         }
01545         else
01546         {
01547             pval1 = gbias.data();
01548             pval2 = bias.data();
01549             ni = output_indices.length();            
01550             if(penalty_type == "L2_square")
01551             {
01552                 val = -two(learning_rate)*bias_decay;
01553                 for(int i=0; i<ni; i++)
01554                     *pval1++ += val*(*pval2++);
01555             }
01556             else if(penalty_type == "L1")
01557             {
01558                 val = -learning_rate*bias_decay;
01559                 for(int i=0; i<ni; i++)
01560                 {
01561                     if(*pval2 > 0)
01562                         *pval1 += val;
01563                     else if(*pval2 < 0)
01564                         *pval1 -= val;
01565                     pval1++;
01566                     pval2++;
01567                 }
01568             }
01569         }
01570     }
01571 
01572     // Weights
01573     if(!fast_exact_is_equal(weight_decay, 0))
01574     {
01575         if(!input_is_sparse && !output_is_sparse)
01576         {      
01577             if(penalty_type == "L2_square")
01578             {
01579                 multiplyAcc(gweights, weights,-two(learning_rate)*weight_decay);
01580             }
01581             else if(penalty_type == "L1")
01582             {
01583                 val = -learning_rate*weight_decay;
01584                 if(gweights.isCompact() && weights.isCompact())
01585                 {
01586                     Mat::compact_iterator itm = gweights.compact_begin();
01587                     Mat::compact_iterator itmend = gweights.compact_end();
01588                     Mat::compact_iterator itx = weights.compact_begin();
01589                     for(; itm!=itmend; ++itm, ++itx)
01590                     {
01591                         if(*itx > 0)
01592                             *itm += val;
01593                         else if(*itx < 0)
01594                             *itm -= val;
01595                     }
01596                 }
01597                 else // use non-compact iterators
01598                 {
01599                     Mat::iterator itm = gweights.begin();
01600                     Mat::iterator itmend = gweights.end();
01601                     Mat::iterator itx = weights.begin();
01602                     for(; itm!=itmend; ++itm, ++itx)
01603                     {
01604                         if(*itx > 0)
01605                             *itm += val;
01606                         else if(*itx < 0)
01607                             *itm -= val;
01608                     }
01609                 }
01610             }
01611         }
01612         else if(!input_is_sparse && output_is_sparse)
01613         {
01614             ni = output_indices.length();
01615             nj = input.length();
01616             pval1 = output_indices.data();
01617 
01618             if(penalty_type == "L2_square")
01619             {
01620                 val = -two(learning_rate)*weight_decay;
01621                 for(int i=0; i<ni; i++)
01622                 {
01623                     for(int j=0; j<nj; j++)
01624                     {
01625                         gweights(j,(int)(*pval1)) += val * weights(j,(int)(*pval1));
01626                     }
01627                     pval1++;
01628                 }
01629             }
01630             else if(penalty_type == "L1")
01631             {
01632                 val = -learning_rate*weight_decay;
01633                 for(int i=0; i<ni; i++)
01634                 {
01635                     for(int j=0; j<nj; j++)
01636                     {
01637                         val2 = weights(j,(int)(*pval1));
01638                         if(val2 > 0)
01639                             gweights(j,(int)(*pval1)) +=  val;
01640                         else if(val2 < 0)
01641                             gweights(j,(int)(*pval1)) -=  val;
01642                     }
01643                     pval1++;
01644                 }
01645             }
01646         }
01647         else if(input_is_sparse && !output_is_sparse)
01648         {
01649             ni = input.length();
01650             nj = output_indices.length();
01651             if(ni != 0)
01652             {
01653                 pval3 = input.data();
01654                 if(penalty_type == "L2_square")
01655                 {
01656                     val = -two(learning_rate)*weight_decay;
01657                     for(int i=0; i<ni; i++)
01658                     {
01659                         pval1 = weights[(int)(*pval3)];
01660                         pval2 = gweights[(int)(*pval3++)];
01661                         for(int j=0; j<nj;j++)
01662                             *pval2++ += val * *pval1++;
01663                     }
01664                 }
01665                 else if(penalty_type == "L1")
01666                 {
01667                     val = -learning_rate*weight_decay;
01668                     for(int i=0; i<ni; i++)
01669                     {
01670                         pval1 = weights[(int)(*pval3)];
01671                         pval2 = gweights[(int)(*pval3++)];
01672                         for(int j=0; j<nj;j++)
01673                         {
01674                             if(*pval1 > 0)
01675                                 *pval2 += val;
01676                             else if(*pval1 < 0)
01677                                 *pval2 -= val;
01678                             pval2++;
01679                             pval1++;
01680                         }
01681                     }                
01682                 }
01683             }
01684         }
01685         else if(input_is_sparse && output_is_sparse)
01686         {
01687             ni = input.length();
01688             nj = output_indices.length();
01689             if(ni != 0)
01690             {
01691                 pval1 = input.data();
01692                 if(penalty_type == "L2_square")
01693                 {
01694                     val = -two(learning_rate)*weight_decay;
01695                     for(int i=0; i<ni; i++)
01696                     {
01697                         pval2 = output_indices.data();
01698                         for(int j=0; j<nj; j++)
01699                         {
01700                             gweights((int)(*pval1),(int)*pval2) += val*weights((int)(*pval1),(int)*pval2);
01701                         pval2++;
01702                         }
01703                         pval1++;
01704                     }
01705                 }
01706                 else if(penalty_type == "L1")
01707                 {
01708                     val = -learning_rate*weight_decay;
01709                     for(int i=0; i<ni; i++)
01710                     {
01711                         pval2 = output_indices.data();
01712                         for(int j=0; j<nj; j++)
01713                         {
01714                             val2 = weights((int)(*pval1),(int)*pval2);
01715                             if(val2 > 0)
01716                                 gweights((int)(*pval1),(int)*pval2) += val;
01717                             else if(val2 < 0)
01718                                 gweights((int)(*pval1),(int)*pval2) -= val;
01719                             pval2++;
01720                         }
01721                         pval1++;
01722                     }
01723                     
01724                 }
01725             }
01726         }
01727     }
01728 }
01729 
01730 void FeatureSetNNet::compute_softmax(const Vec& x, const Vec& y) const
01731 {
01732     int n = x.length();
01733     
01734 //    real* yp = y.data();
01735 //    real* xp = x.data();
01736 //    for(int i=0; i<n; i++)
01737 //    {
01738 //        *yp++ = *xp > 1e-5 ? *xp : 1e-5;
01739 //        xp++;
01740 //    }
01741 
01742     if (n>0)
01743     {
01744         real* yp = y.data();
01745         real* xp = x.data();
01746         real maxx = max(x);
01747         real s = 0;
01748         for (int i=0;i<n;i++)
01749             s += (*yp++ = safeexp(*xp++-maxx));
01750         if (s == 0) PLERROR("trying to divide by 0 in softmax");
01751         s = 1.0 / s;
01752         yp = y.data();
01753         for (int i=0;i<n;i++)
01754             *yp++ *= s;
01755     }
01756 }
01757 
01758 real FeatureSetNNet::nll(const Vec& outputv, int target) const
01759 {
01760     return -safeflog(outputv[target]);
01761 }
01762     
01763 real FeatureSetNNet::classification_loss(const Vec& outputv, int target) const
01764 {
01765     return (argmax(outputv) == target ? 0 : 1);
01766 }
01767 
01768 void FeatureSetNNet::initializeParams(bool set_seed)
01769 {
01770     if (set_seed) {
01771         if (seed_>=0)
01772             rgen->manual_seed(seed_);
01773     }
01774 
01775 
01776     PP<Dictionary> dict = train_set->getDictionary(inputsize_);
01777     total_output_size = dict->size();
01778 
01779     total_feats_per_token = 0;
01780     for(int i=0; i<n_feat_sets; i++)
01781         total_feats_per_token += feat_sets[i]->size();
01782 
01783     int nnet_inputsize;
01784     if(dist_rep_dim > 0)
01785     {
01786         wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
01787         bout_dist_rep.resize(dist_rep_dim);
01788         nnet_inputsize = dist_rep_dim*inputsize_/n_feat_sets;
01789         nnet_input.resize(nnet_inputsize);
01790 
01791         fillWeights(wout_dist_rep);
01792         bout_dist_rep.clear();
01793 
01794         gradient_wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
01795         gradient_bout_dist_rep.resize(dist_rep_dim);
01796         gradient_nnet_input.resize(nnet_inputsize);
01797         gradient_wout_dist_rep.clear();
01798         gradient_bout_dist_rep.clear();
01799         gradient_nnet_input.clear();
01800     }
01801     else
01802     {
01803         nnet_inputsize = total_feats_per_token*inputsize_/n_feat_sets;
01804         nnet_input = feat_input;
01805     }
01806 
01807     if(nhidden>0) 
01808     {
01809         w1.resize(nnet_inputsize,nhidden);
01810         b1.resize(nhidden);
01811         hiddenv.resize(nhidden);
01812 
01813         fillWeights(w1);
01814         b1.clear();
01815 
01816         gradient_w1.resize(nnet_inputsize,nhidden);
01817         gradient_b1.resize(nhidden);
01818         gradient_hiddenv.resize(nhidden);
01819         gradient_act_hiddenv.resize(nhidden);
01820         gradient_w1.clear();
01821         gradient_b1.clear();
01822         gradient_hiddenv.clear();
01823         gradient_act_hiddenv.clear();
01824         if(nhidden2>0) 
01825         {
01826             w2.resize(nhidden,nhidden2);
01827             b2.resize(nhidden2);
01828             hidden2v.resize(nhidden2);
01829             wout.resize(nhidden2,total_output_size);
01830             bout.resize(total_output_size);
01831 
01832             fillWeights(w2);
01833             b2.clear();
01834 
01835             gradient_w2.resize(nhidden,nhidden2);
01836             gradient_b2.resize(nhidden2);
01837             gradient_hidden2v.resize(nhidden2);
01838             gradient_act_hidden2v.resize(nhidden2);
01839             gradient_wout.resize(nhidden2,total_output_size);
01840             gradient_bout.resize(total_output_size);
01841             gradient_w2.clear();
01842             gradient_b2.clear();
01843             gradient_hidden2v.clear();
01844             gradient_act_hidden2v.clear();
01845             gradient_wout.clear();
01846             gradient_bout.clear();
01847         }
01848         else
01849         {
01850             wout.resize(nhidden,total_output_size);
01851             bout.resize(total_output_size);
01852 
01853             gradient_wout.resize(nhidden,total_output_size);
01854             gradient_bout.resize(total_output_size);
01855             gradient_wout.clear();
01856             gradient_bout.clear();
01857         }
01858             
01859         if(direct_in_to_out)
01860         {
01861             direct_wout.resize(nnet_inputsize,total_output_size);
01862             direct_bout.resize(0); // Because it is not used
01863 
01864             fillWeights(direct_wout);
01865                 
01866             gradient_direct_wout.resize(nnet_inputsize,total_output_size);
01867             gradient_direct_wout.clear();
01868             gradient_direct_bout.resize(0); // idem
01869         }
01870     }
01871     else
01872     {
01873         wout.resize(nnet_inputsize,total_output_size);
01874         bout.resize(total_output_size);
01875 
01876         gradient_wout.resize(nnet_inputsize,total_output_size);
01877         gradient_bout.resize(total_output_size);
01878         gradient_wout.clear();
01879         gradient_bout.clear();
01880     }
01881 
01882     //fillWeights(wout);
01883     
01884     if (fixed_output_weights) {
01885         static Vec values;
01886         if (values.size()==0)
01887         {
01888             values.resize(2);
01889             values[0]=-1;
01890             values[1]=1;
01891         }
01892         rgen->fill_random_discrete(wout.toVec(), values);
01893     }
01894     else 
01895         fillWeights(wout);
01896 
01897     bout.clear();
01898 
01899     gradient_outputv.resize(total_output_size);
01900     gradient_act_outputv.resize(total_output_size);
01901     gradient_outputv.clear();
01902     gradient_act_outputv.clear();
01903 }
01904 
01906 // makeDeepCopyFromShallowCopy //
01908 void FeatureSetNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01909 {
01910     inherited::makeDeepCopyFromShallowCopy(copies);
01911 
01912     // Private variables
01913     deepCopyField(target_values,copies);
01914     deepCopyField(output_comp,copies);
01915     deepCopyField(row,copies);
01916     deepCopyField(last_layer,copies);
01917     deepCopyField(gradient_last_layer,copies);
01918     deepCopyField(feats,copies);
01919     deepCopyField(gradient,copies);
01920 
01921     // Protected variables
01922     deepCopyField(feat_input,copies);
01923     deepCopyField(gradient_feat_input,copies);
01924     deepCopyField(nnet_input,copies);
01925     deepCopyField(gradient_nnet_input,copies);
01926     deepCopyField(hiddenv,copies);
01927     deepCopyField(gradient_hiddenv,copies);
01928     deepCopyField(gradient_act_hiddenv,copies);
01929     deepCopyField(hidden2v,copies);
01930     deepCopyField(gradient_hidden2v,copies);
01931     deepCopyField(gradient_act_hidden2v,copies);
01932     deepCopyField(gradient_outputv,copies);
01933     deepCopyField(gradient_act_outputv,copies);
01934     deepCopyField(feats_since_last_update,copies);
01935     deepCopyField(target_values_since_last_update,copies);
01936     deepCopyField(val_string_reference_set,copies);
01937     deepCopyField(target_values_reference_set,copies);
01938 
01939     // Public variables
01940     deepCopyField(w1,copies);
01941     deepCopyField(gradient_w1,copies);
01942     deepCopyField(b1,copies);
01943     deepCopyField(gradient_b1,copies);
01944     deepCopyField(w2,copies);
01945     deepCopyField(gradient_w2,copies);
01946     deepCopyField(b2,copies);
01947     deepCopyField(gradient_b2,copies);
01948     deepCopyField(wout,copies);
01949     deepCopyField(gradient_wout,copies);
01950     deepCopyField(bout,copies);
01951     deepCopyField(gradient_bout,copies);
01952     deepCopyField(direct_wout,copies);
01953     deepCopyField(gradient_direct_wout,copies);
01954     deepCopyField(direct_bout,copies);
01955     deepCopyField(gradient_direct_bout,copies);
01956     deepCopyField(wout_dist_rep,copies);
01957     deepCopyField(gradient_wout_dist_rep,copies);
01958     deepCopyField(bout_dist_rep,copies);
01959     deepCopyField(gradient_bout_dist_rep,copies);
01960 
01961     // Public build options
01962     deepCopyField(cost_funcs,copies);
01963     deepCopyField(feat_sets,copies);
01964 }
01965 
01967 // outputsize //
01969 int FeatureSetNNet::outputsize() const {
01970     return targetsize_;
01971 }
01972 
01974 // train //
01976 void FeatureSetNNet::train()
01977 {
01978     //Profiler::activate();
01979     if(!train_set)
01980         PLERROR("In FeatureSetNNet::train, you did not setTrainingSet");
01981 
01982     if(!train_stats)
01983         PLERROR("In FeatureSetNNet::train, you did not setTrainStatsCollector");
01984  
01985     Vec outputv(total_output_size);
01986     Vec costsv(getTrainCostNames().length());
01987     Vec inputv(train_set->inputsize());
01988     Vec targetv(train_set->targetsize());
01989     real sample_weight=1;
01990 
01991 
01992     int l = train_set->length();  
01993     int bs = batch_size>0 ? batch_size : l;
01994 
01995     PP<ProgressBar> pb;
01996     if(report_progress)
01997         pb = new ProgressBar("Training " + classname() + " from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01998 
01999     //if(stage == 0)
02000     //{
02001     //    for(int t=0; t<l;t++)
02002     //    {
02003     //        cout << "t=" << t << " ";
02004     //        train_set->getExample(t,inputv,targetv,sample_weight);
02005     //        row.subVec(0,inputsize_) << inputv;
02006     //        train_set->getValues(row,inputsize_,target_values);
02007     //        if(target_values.length() != 1)
02008     //            verify_gradient(inputv,targetv,1e-6);
02009     //    }
02010     //    return;
02011     //}
02012 
02013     Mat old_gradient_wout;
02014     Vec old_gradient_bout;
02015     Mat old_gradient_wout_dist_rep;
02016     Vec old_gradient_bout_dist_rep;
02017     Mat old_gradient_w1;
02018     Vec old_gradient_b1;
02019     Mat old_gradient_w2;
02020     Vec old_gradient_b2;
02021     Mat old_gradient_direct_wout;
02022 
02023     if(stochastic_gradient_descent_speedup)
02024     {
02025         // Trick to make stochastic gradient descent faster
02026 
02027         old_gradient_wout = gradient_wout;
02028         old_gradient_bout = gradient_bout;
02029         gradient_wout = wout;
02030         gradient_bout = bout;
02031         
02032         if(dist_rep_dim > 0)
02033         {
02034             old_gradient_wout_dist_rep = gradient_wout_dist_rep;
02035             old_gradient_bout_dist_rep = gradient_bout_dist_rep;
02036             gradient_wout_dist_rep = wout_dist_rep;
02037             gradient_bout_dist_rep = bout_dist_rep;
02038         }
02039 
02040         if(nhidden>0) 
02041         {
02042             old_gradient_w1 = gradient_w1;
02043             old_gradient_b1 = gradient_b1;
02044             gradient_w1 = w1;
02045             gradient_b1 = b1;
02046             if(nhidden2>0) 
02047             {
02048                 old_gradient_w2 = gradient_w2;
02049                 old_gradient_b2 = gradient_b2;
02050                 gradient_w2 = w2;
02051                 gradient_b2 = b2;
02052             }
02053             
02054             if(direct_in_to_out)
02055             {
02056                 old_gradient_direct_wout = gradient_direct_wout;
02057                 gradient_direct_wout = direct_wout;
02058             }
02059         }
02060     }
02061 
02062     int initial_stage = stage;
02063     while(stage<nstages)
02064     {
02065         for(int t=0; t<l;)
02066         {
02067             //if(t%1000 == 0)
02068             //{
02069             //    cout << "Time: " << clock()/CLOCKS_PER_SEC << " seconds." << endl;
02070             //}
02071             for(int i=0; i<bs; i++)
02072             {
02073                 //if(t == 71705)
02074                 //    cout << "It's going to fuck !!!" << endl;
02075                 
02076                 //if(t == 71704)
02077                 //    cout << "It's going to fuck !!!" << endl;
02078                 
02079                 train_set->getExample(t%l,inputv,targetv,sample_weight);
02080                 //Profiler::start("fprop()");
02081                 fprop(inputv,outputv,targetv,costsv,sample_weight);
02082                 //Profiler::end("fprop()");
02083                 //Profiler::start("bprop()");
02084                 bprop(inputv,outputv,targetv,costsv,
02085                       start_learning_rate/(bs*(1.0+decrease_constant*total_updates)),
02086                       sample_weight);
02087                 //Profiler::end("bprop()");
02088                 train_stats->update(costsv);
02089                 t++;
02090             }
02091             // Update
02092             if(!stochastic_gradient_descent_speedup)
02093                 update();
02094             total_updates++;
02095         }
02096         train_stats->finalize();
02097         ++stage;
02098         if(verbosity>2)
02099             cout << "Epoch " << stage << " train objective: " 
02100                  << train_stats->getMean() << endl;
02101         if(pb) pb->update(stage-initial_stage);
02102     }
02103 
02104     if(stochastic_gradient_descent_speedup)
02105     {
02106         // Trick to make stochastic gradient descent faster
02107 
02108         gradient_wout = old_gradient_wout;
02109         gradient_bout = old_gradient_bout;
02110         
02111         if(dist_rep_dim > 0)
02112         {
02113             gradient_wout_dist_rep = old_gradient_wout_dist_rep;
02114             gradient_bout_dist_rep = old_gradient_bout_dist_rep;
02115         }
02116 
02117         if(nhidden>0) 
02118         {
02119             gradient_w1 = old_gradient_w1;
02120             gradient_b1 = old_gradient_b1;
02121             if(nhidden2>0) 
02122             {
02123                 gradient_w2 = old_gradient_w2;
02124                 gradient_b2 = old_gradient_b2;
02125             }
02126             
02127             if(direct_in_to_out)
02128             {
02129                 gradient_direct_wout = old_gradient_direct_wout;
02130             }
02131         }
02132     }
02133     //Profiler::report(cout);
02134 }
02135 
02136 void FeatureSetNNet::verify_gradient(Vec& input, Vec targetv, real step)
02137 {
02138     Vec costsv(getTrainCostNames().length());
02139     real sampleweight = 1;
02140     real verify_step = step;
02141     
02142     // To avoid the interaction between fprop and this function
02143     int nfeats = 0;
02144     int id = 0;
02145     int ifeats = 0;
02146 
02147     Vec est_gradient_bout;
02148     Mat est_gradient_wout;
02149     Vec est_gradient_bout_dist_rep;
02150     Mat est_gradient_wout_dist_rep;
02151     Vec est_gradient_b1;
02152     Mat est_gradient_w1;
02153     Vec est_gradient_b2;
02154     Mat est_gradient_w2;
02155     Vec est_gradient_direct_bout;
02156     Mat est_gradient_direct_wout;
02157 
02158     int nnet_inputsize;
02159     if(dist_rep_dim > 0)
02160     {
02161         nnet_inputsize = dist_rep_dim*inputsize_/n_feat_sets;
02162         est_gradient_wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
02163         est_gradient_bout_dist_rep.resize(dist_rep_dim);
02164         est_gradient_wout_dist_rep.clear();
02165         est_gradient_bout_dist_rep.clear();
02166         gradient_wout_dist_rep.clear();
02167         gradient_bout_dist_rep.clear();
02168     }
02169     else
02170     {
02171         nnet_inputsize = total_feats_per_token*inputsize_/n_feat_sets;
02172     }
02173 
02174     if(nhidden>0) 
02175     {
02176         est_gradient_w1.resize(nnet_inputsize,nhidden);
02177         est_gradient_b1.resize(nhidden);
02178         est_gradient_w1.clear();
02179         est_gradient_b1.clear();
02180         gradient_w1.clear();
02181         gradient_b1.clear();
02182         if(nhidden2>0) 
02183         {
02184             est_gradient_w2.resize(nhidden,nhidden2);
02185             est_gradient_b2.resize(nhidden2);
02186             est_gradient_wout.resize(nhidden2,total_output_size);
02187             est_gradient_bout.resize(total_output_size);
02188             est_gradient_w2.clear();
02189             est_gradient_b2.clear();
02190             est_gradient_wout.clear();
02191             est_gradient_bout.clear();
02192             gradient_w2.clear();
02193             gradient_b2.clear();
02194             gradient_wout.clear();
02195             gradient_bout.clear();
02196         }
02197         else
02198         {
02199             est_gradient_wout.resize(nhidden,total_output_size);
02200             est_gradient_bout.resize(total_output_size);
02201             est_gradient_wout.clear();
02202             est_gradient_bout.clear();
02203             gradient_wout.clear();
02204             gradient_bout.clear();
02205         }
02206             
02207         if(direct_in_to_out)
02208         {
02209             est_gradient_direct_wout.resize(nnet_inputsize,total_output_size);
02210             est_gradient_direct_wout.clear();
02211             est_gradient_direct_bout.resize(0); // idem
02212             gradient_direct_wout.clear();                        
02213         }
02214     }
02215     else
02216     {
02217         est_gradient_wout.resize(nnet_inputsize,total_output_size);
02218         est_gradient_bout.resize(total_output_size);
02219         est_gradient_wout.clear();
02220         est_gradient_bout.clear();
02221         gradient_wout.clear();
02222         gradient_bout.clear();
02223     }
02224 
02225     fprop(input, output_comp, targetv, costsv);
02226     bprop(input,output_comp,targetv,costsv,
02227           -1, sampleweight);
02228     clearProppathGradient();
02229     
02230     // Compute estimated gradient
02231 
02232     if(dist_rep_dim > 0) 
02233     {        
02234         nfeats = 0;
02235         id = 0;
02236         for(int i=0; i<inputsize_;)
02237         {
02238             ifeats = 0;
02239             for(int j=0; j<n_feat_sets; j++,i++)
02240                 ifeats += feats[i].length();
02241             verify_gradient_affine_transform(
02242                 input,output_comp, targetv, costsv, sampleweight,
02243                 feat_input.subVec(nfeats,ifeats),
02244                 wout_dist_rep, bout_dist_rep,
02245                 est_gradient_wout_dist_rep, est_gradient_bout_dist_rep,
02246                 true, false, verify_step);
02247             nfeats += ifeats;
02248             id++;
02249         }
02250 
02251         cout << "Verify wout_dist_rep" << endl;
02252         output_gradient_verification(gradient_wout_dist_rep.toVec(), est_gradient_wout_dist_rep.toVec());
02253         cout << "Verify bout_dist_rep" << endl;
02254         output_gradient_verification(gradient_bout_dist_rep, est_gradient_bout_dist_rep);
02255         gradient_wout_dist_rep.clear();
02256         gradient_bout_dist_rep.clear();
02257 
02258         if(nhidden>0) 
02259         {
02260             verify_gradient_affine_transform(
02261                 input,output_comp, targetv, costsv, sampleweight,
02262                 nnet_input,w1,b1,
02263                 est_gradient_w1, est_gradient_b1, false,false, verify_step);
02264 
02265             cout << "Verify w1" << endl;
02266             output_gradient_verification(gradient_w1.toVec(), est_gradient_w1.toVec());
02267             cout << "Verify b1" << endl;
02268             output_gradient_verification(gradient_b1, est_gradient_b1);
02269             
02270             if(nhidden2>0) 
02271             {
02272                 verify_gradient_affine_transform(
02273                     input,output_comp, targetv, costsv, sampleweight,    
02274                     hiddenv,w2,b2,
02275                     est_gradient_w2, est_gradient_b2,
02276                     false,false, verify_step);
02277                 cout << "Verify w2" << endl;
02278                 output_gradient_verification(gradient_w2.toVec(), est_gradient_w2.toVec());
02279                 cout << "Verify b2" << endl;
02280                 output_gradient_verification(gradient_b2, est_gradient_b2);
02281 
02282                 last_layer = hidden2v;
02283             }
02284             else
02285                 last_layer = hiddenv;
02286         }
02287         else
02288             last_layer = nnet_input;
02289 
02290         verify_gradient_affine_transform(
02291             input,output_comp, targetv, costsv, sampleweight,
02292             last_layer,wout,bout,
02293             est_gradient_wout, est_gradient_bout, false,
02294             possible_targets_vary,verify_step,target_values);
02295 
02296         cout << "Verify wout" << endl;
02297         output_gradient_verification(gradient_wout.toVec(), est_gradient_wout.toVec());
02298         cout << "Verify bout" << endl;
02299         output_gradient_verification(gradient_bout, est_gradient_bout);
02300  
02301         if(direct_in_to_out && nhidden>0)
02302         {
02303             verify_gradient_affine_transform(
02304                 input,output_comp, targetv, costsv, sampleweight,
02305                 nnet_input,direct_wout,direct_bout,
02306                 est_gradient_direct_wout, est_gradient_direct_bout,false,
02307                 possible_targets_vary, verify_step, target_values);
02308             cout << "Verify direct_wout" << endl;
02309             output_gradient_verification(gradient_direct_wout.toVec(), est_gradient_direct_wout.toVec());
02310             //cout << "Verify direct_bout" << endl;
02311             //output_gradient_verification(gradient_direct_bout, est_gradient_direct_bout);
02312         }
02313     }
02314     else
02315     {        
02316         if(nhidden>0)
02317         {
02318             verify_gradient_affine_transform(
02319                 input,output_comp, targetv, costsv, sampleweight,
02320                 feat_input,w1,b1,
02321                 est_gradient_w1, est_gradient_b1,
02322                 true,false, verify_step);
02323 
02324             cout << "Verify w1" << endl;
02325             output_gradient_verification(gradient_w1.toVec(), est_gradient_w1.toVec());
02326             cout << "Verify b1" << endl;
02327             output_gradient_verification(gradient_b1, est_gradient_b1);
02328 
02329             if(nhidden2>0)
02330             {
02331                 verify_gradient_affine_transform(
02332                     input,output_comp, targetv, costsv, sampleweight,
02333                     hiddenv,w2,b2,
02334                     est_gradient_w2, est_gradient_b2,true,false,
02335                     verify_step);
02336 
02337                 cout << "Verify w2" << endl;
02338                 output_gradient_verification(gradient_w2.toVec(), est_gradient_w2.toVec());
02339                 cout << "Verify b2" << endl;
02340                 output_gradient_verification(gradient_b2, est_gradient_b2);
02341                 
02342                 last_layer = hidden2v;
02343             }
02344             else
02345                 last_layer = hiddenv;
02346         }
02347         else
02348             last_layer = feat_input;
02349         
02350         verify_gradient_affine_transform(
02351             input,output_comp, targetv, costsv, sampleweight,
02352             last_layer,wout,bout,
02353             est_gradient_wout, est_gradient_bout, nhidden<=0,
02354             possible_targets_vary,verify_step, target_values);
02355 
02356         cout << "Verify wout" << endl;
02357         output_gradient_verification(gradient_wout.toVec(), est_gradient_wout.toVec());
02358         cout << "Verify bout" << endl;
02359         output_gradient_verification(gradient_bout, est_gradient_bout);
02360         
02361         if(direct_in_to_out && nhidden>0)
02362         {
02363             verify_gradient_affine_transform(
02364                 input,output_comp, targetv, costsv, sampleweight,
02365                 feat_input,direct_wout,direct_bout,
02366                 est_gradient_wout, est_gradient_bout,true,
02367                 possible_targets_vary, verify_step,target_values);
02368             cout << "Verify direct_wout" << endl;
02369             output_gradient_verification(gradient_direct_wout.toVec(), est_gradient_direct_wout.toVec());
02370             cout << "Verify direct_bout" << endl;
02371             output_gradient_verification(gradient_direct_bout, est_gradient_direct_bout);
02372         }
02373     }
02374 
02375 }
02376 
02377 void FeatureSetNNet::verify_gradient_affine_transform(
02378     Vec global_input, Vec& global_output, Vec& global_targetv,
02379     Vec& global_costs, real sampleweight,
02380     Vec input, Mat weights, Vec bias,
02381     Mat est_gweights, Vec est_gbias,  
02382     bool input_is_sparse, bool output_is_sparse,
02383     real step,
02384     Vec output_indices) const
02385 {
02386     real *pval1, *pval2, *pval3;
02387     int ni,nj;
02388     real out1,out2;
02389     // Bias
02390     if(bias.length() != 0)
02391     {
02392         if(output_is_sparse)
02393         {
02394             pval1 = est_gbias.data();
02395             pval2 = bias.data();
02396             pval3 = output_indices.data();
02397             ni = output_indices.length();
02398             for(int i=0; i<ni; i++)
02399             {
02400                 pval2[(int)*pval3] += step;
02401                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02402                 out1 = global_costs[0];
02403                 pval2[(int)*pval3] -= 2*step;
02404                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02405                 out2 = global_costs[0];
02406                 pval1[(int)*pval3] = (out1-out2)/(2*step);
02407                 pval2[(int)*pval3] += step;
02408                 pval3++;
02409             }
02410         }
02411         else
02412         {
02413             pval1 = est_gbias.data();
02414             pval2 = bias.data();
02415             ni = bias.length();
02416             for(int i=0; i<ni; i++)
02417             {
02418                 *pval2 += step;
02419                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02420                 out1 = global_costs[0];
02421                 *pval2 -= 2*step;
02422                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02423                 out2 = global_costs[0];
02424                 *pval1 = (out1-out2)/(2*step);
02425                 *pval2 += step;
02426                 pval1++; 
02427                 pval2++;
02428             }
02429         }
02430     }
02431 
02432     // Weights
02433     if(!input_is_sparse && !output_is_sparse)
02434     {
02435         ni = weights.length();
02436         nj = weights.width();
02437         for(int i=0; i<ni; i++)
02438             for(int j=0; j<nj; j++)
02439             {
02440                 weights(i,j) += step;
02441                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02442                 out1 = global_costs[0];
02443                 weights(i,j) -= 2*step;
02444                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02445                 out2 = global_costs[0];
02446                 weights(i,j) += step;
02447                 est_gweights(i,j) = (out1-out2)/(2*step);
02448             }
02449     }
02450     else if(!input_is_sparse && output_is_sparse)
02451     {
02452         ni = output_indices.length();
02453         nj = input.length();
02454         pval3 = output_indices.data();
02455         for(int i=0; i<ni; i++)
02456         {
02457             for(int j=0; j<nj; j++)
02458             {
02459                 weights(j,(int)*pval3) += step;
02460                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02461                 out1 = global_costs[0];
02462                 weights(j,(int)*pval3) -= 2*step;
02463                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02464                 out2 = global_costs[0];
02465                 weights(j,(int)*pval3) += step;
02466                 est_gweights(j,(int)*pval3) = (out1-out2)/(2*step);
02467 //                if(target_values.length() != 1 && input[j] != 0 && (out1-out2)/(2*step) == 0)
02468 //                {                    
02469 //                    print_what_the_fuck();
02470 //                    weights(j,(int)*pval3) += 1;
02471 //                    fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02472 //                    weights(j,(int)*pval3) -= 1;
02473 //                    cout << "out1 - global_costs[0] =" << out1-global_costs[0] << endl;
02474 //                }
02475             }
02476             pval3++;
02477         }
02478     }
02479     else if(input_is_sparse && !output_is_sparse)
02480     {
02481         ni = input.length();
02482         nj = weights.width();
02483         if(ni != 0 )
02484         {
02485             pval3 = input.data();
02486             for(int i=0; i<ni; i++)
02487             {
02488                 pval1 = est_gweights[(int)(*pval3)];
02489                 pval2 = weights[(int)(*pval3++)];
02490                 for(int j=0; j<nj;j++)
02491                 {
02492                     *pval2 += step;
02493                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02494                     out1 = global_costs[0];
02495                     *pval2 -= 2*step;
02496                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02497                     out2 = global_costs[0];
02498                     *pval1 = (out1-out2)/(2*step);
02499                     *pval2 += step;
02500                     pval1++;
02501                     pval2++;
02502                 }
02503             }
02504         }
02505     }
02506     else if(input_is_sparse && output_is_sparse)
02507     {
02508         // Weights
02509         ni = input.length();
02510         nj = output_indices.length();
02511         if(ni != 0)
02512         {
02513             pval2 = input.data();
02514             for(int i=0; i<ni; i++)
02515             {
02516                 pval3 = output_indices.data();
02517                 for(int j=0; j<nj; j++)
02518                 {
02519                     weights((int)(*pval2),(int)*pval3) += step;
02520                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02521                     out1 = global_costs[0];
02522                     weights((int)(*pval2),(int)*pval3) -= 2*step;
02523                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02524                     out2 = global_costs[0];
02525                     est_gweights((int)(*pval2),(int)*pval3)  = (out1-out2)/(2*step);
02526                     weights((int)(*pval2),(int)*pval3) += step;
02527                     pval3++;
02528                 }
02529                 pval2++;
02530             }
02531         }
02532     }
02533 }
02534 
02535 
02536 void FeatureSetNNet::output_gradient_verification(Vec grad, Vec est_grad)
02537 {
02538     // Inspired from Func::verifyGradient()
02539 
02540     Vec num = apply(grad - est_grad,(tRealFunc)FABS);
02541     Vec denom = real(0.5)*apply(grad + est_grad,(tRealFunc)FABS);
02542     for (int i = 0; i < num.length(); i++)
02543     {
02544         if (!fast_exact_is_equal(num[i], 0))
02545             num[i] /= denom[i];
02546         else
02547             if(!fast_exact_is_equal(denom[i],0))
02548                 cout << "at position " << i << " num[i] == 0 but denom[i] = " << denom[i] << endl;
02549     }
02550     int pos = argmax(num);
02551     cout << max(num) << " (at position " << pos << "/" << num.length()
02552          << ", computed = " << grad[pos] << " and estimated = "
02553          << est_grad[pos] << ")" << endl;
02554 
02555     real norm_grad = norm(grad);
02556     real norm_est_grad = norm(est_grad);
02557     real cos_angle = fast_exact_is_equal(norm_grad*norm_est_grad,
02558                                          0)
02559         ? MISSING_VALUE
02560         : dot(grad,est_grad) /
02561         (norm_grad*norm_est_grad);
02562     if (cos_angle > 1)
02563         cos_angle = 1;      // Numerical imprecisions can lead to such situation.
02564     cout << "grad.length() = " << grad.length() << endl;
02565     cout << "cos(angle) : " << cos_angle << endl;
02566     cout << "angle : " << ( is_missing(cos_angle) ? MISSING_VALUE
02567                             : acos(cos_angle) ) << endl;
02568 }
02569 
02570 void FeatureSetNNet::batchComputeOutputAndConfidence(VMat inputs, real probability,
02571                                          VMat outputs_and_confidence) const
02572 {
02573     val_string_reference_set = inputs;
02574     inherited::batchComputeOutputAndConfidence(inputs,probability,outputs_and_confidence);
02575     val_string_reference_set = train_set;
02576 }
02577 
02578 void FeatureSetNNet::use(VMat testset, VMat outputs) const
02579 {
02580     val_string_reference_set = testset;
02581     if(testset->width() > train_set->inputsize())
02582         target_values_reference_set = testset;
02583     target_values_reference_set = testset;
02584     inherited::use(testset,outputs);
02585     val_string_reference_set = train_set;
02586     if(testset->width() > train_set->inputsize())
02587         target_values_reference_set = train_set;
02588 }
02589 
02590 void FeatureSetNNet::test(VMat testset, PP<VecStatsCollector> test_stats, 
02591                       VMat testoutputs, VMat testcosts) const
02592 {
02593     val_string_reference_set = testset;
02594     target_values_reference_set = testset;
02595     inherited::test(testset,test_stats,testoutputs,testcosts);
02596     val_string_reference_set = train_set;
02597     target_values_reference_set = train_set;
02598 }
02599 
02600 VMat FeatureSetNNet::processDataSet(VMat dataset) const
02601 {
02602     VMat ret;
02603     val_string_reference_set = dataset;
02604     // Assumes it contains the target part information
02605     if(dataset->width() > train_set->inputsize())
02606         target_values_reference_set = dataset;
02607     ret = inherited::processDataSet(dataset);
02608     val_string_reference_set = train_set;
02609     if(dataset->width() > train_set->inputsize())
02610         target_values_reference_set = train_set;
02611     return ret;
02612 }
02613 
02614 } // end of namespace PLearn
02615 
02616 
02617 /*
02618   Local Variables:
02619   mode:c++
02620   c-basic-offset:4
02621   c-file-style:"stroustrup"
02622   c-file-offsets:((innamespace . 0)(inline-open . 0))
02623   indent-tabs-mode:nil
02624   fill-column:79
02625   End:
02626 */
02627 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines