PLearn 0.1
FeatureSetSequentialCRF.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // FeatureSetSequentialCRF.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 
00038 #include "FeatureSetSequentialCRF.h"
00039 #include <plearn/vmat/SubVMatrix.h>
00040 //#include <plearn/sys/Profiler.h>
00041 #include <time.h>
00042 #include <stdio.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(FeatureSetSequentialCRF, "Feedforward Neural Network for symbolic data represented using features", 
00048                         "Inspired from the NNet class, FeatureSetSequentialCRF is simply an extension that deals with\n"
00049                         "feature representations of symbolic data. It can also learn distributed representations\n"
00050                         "for each symbolic input token. The possible targets are defined by the VMatrix's\n"
00051                         "getValues() function.\n");
00052 
00053 FeatureSetSequentialCRF::FeatureSetSequentialCRF() // DEFAULT VALUES FOR ALL OPTIONS
00054     :
00055 rgen(new PRandom()),
00056 nhidden(0),
00057 nhidden2(0),
00058 weight_decay(0),
00059 bias_decay(0),
00060 layer1_weight_decay(0),
00061 layer1_bias_decay(0),
00062 layer2_weight_decay(0),
00063 layer2_bias_decay(0),
00064 output_layer_weight_decay(0),
00065 output_layer_bias_decay(0),
00066 direct_in_to_out_weight_decay(0),
00067 output_layer_dist_rep_weight_decay(0),
00068 output_layer_dist_rep_bias_decay(0),
00069 fixed_output_weights(0),
00070 direct_in_to_out(0),
00071 penalty_type("L2_square"),
00072 output_transfer_func(""),
00073 hidden_transfer_func("tanh"),
00074 start_learning_rate(0.01),
00075 decrease_constant(0),
00076 batch_size(1),
00077 stochastic_gradient_descent_speedup(true),
00078 initialization_method("uniform_linear"),
00079 dist_rep_dim(-1),
00080 possible_targets_vary(false)
00081 {}
00082 
00083 FeatureSetSequentialCRF::~FeatureSetSequentialCRF()
00084 {
00085 }
00086 
00087 void FeatureSetSequentialCRF::declareOptions(OptionList& ol)
00088 {
00089     declareOption(ol, "nhidden", &FeatureSetSequentialCRF::nhidden, 
00090                   OptionBase::buildoption, 
00091                   "Number of hidden units in first hidden layer (0 means no hidden layer).\n");
00092     
00093     declareOption(ol, "nhidden2", &FeatureSetSequentialCRF::nhidden2, 
00094                   OptionBase::buildoption, 
00095                   "Number of hidden units in second hidden layer (0 means no hidden layer).\n");
00096     
00097     declareOption(ol, "weight_decay", &FeatureSetSequentialCRF::weight_decay, 
00098                   OptionBase::buildoption, 
00099                   "Global weight decay for all layers.\n");
00100     
00101     declareOption(ol, "bias_decay", &FeatureSetSequentialCRF::bias_decay, 
00102                   OptionBase::buildoption, 
00103                   "Global bias decay for all layers.\n");
00104     
00105     declareOption(ol, "layer1_weight_decay", &FeatureSetSequentialCRF::layer1_weight_decay, 
00106                   OptionBase::buildoption, 
00107                   "Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00108     
00109     declareOption(ol, "layer1_bias_decay", &FeatureSetSequentialCRF::layer1_bias_decay, 
00110                   OptionBase::buildoption, 
00111                   "Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00112     
00113     declareOption(ol, "layer2_weight_decay", &FeatureSetSequentialCRF::layer2_weight_decay, 
00114                   OptionBase::buildoption, 
00115                   "Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00116     
00117     declareOption(ol, "layer2_bias_decay", &FeatureSetSequentialCRF::layer2_bias_decay, 
00118                   OptionBase::buildoption, 
00119                   "Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00120     
00121     declareOption(ol, "output_layer_weight_decay", 
00122                   &FeatureSetSequentialCRF::output_layer_weight_decay, 
00123                   OptionBase::buildoption, 
00124                   "Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00125     
00126     declareOption(ol, "output_layer_bias_decay", 
00127                   &FeatureSetSequentialCRF::output_layer_bias_decay, 
00128                   OptionBase::buildoption, 
00129                   "Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00130     
00131     declareOption(ol, "direct_in_to_out_weight_decay", 
00132                   &FeatureSetSequentialCRF::direct_in_to_out_weight_decay, 
00133                   OptionBase::buildoption,
00134                   "Additional weight decay for the weights going from the input directly to the output layer.  Is added to 'weight_decay'.\n");
00135     
00136     declareOption(ol, "output_layer_dist_rep_weight_decay", 
00137                   &FeatureSetSequentialCRF::output_layer_dist_rep_weight_decay, 
00138                   OptionBase::buildoption, 
00139                   "Additional weight decay for the output layer of distributed representation\n"
00140                   "predictor.  Is added to 'weight_decay'.\n");
00141     
00142     declareOption(ol, "output_layer_dist_rep_bias_decay", 
00143                   &FeatureSetSequentialCRF::output_layer_dist_rep_bias_decay, 
00144                   OptionBase::buildoption, 
00145                   "Additional bias decay for the output layer of distributed representation\n"
00146                   "predictor.  Is added to 'bias_decay'.\n");
00147     
00148     declareOption(ol, "fixed_output_weights", 
00149                   &FeatureSetSequentialCRF::fixed_output_weights, 
00150                   OptionBase::buildoption, 
00151                   "If true then the output weights are not learned. They are initialized to +1 or -1 randomly.\n");
00152     
00153     declareOption(ol, "direct_in_to_out", &FeatureSetSequentialCRF::direct_in_to_out, 
00154                   OptionBase::buildoption, 
00155                   "If true then direct input to output weights will be added (if nhidden > 0).\n");
00156     
00157     declareOption(ol, "penalty_type", &FeatureSetSequentialCRF::penalty_type,
00158                   OptionBase::buildoption,
00159                   "Penalty to use on the weights (for weight and bias decay).\n"
00160                   "Can be any of:\n"
00161                   "  - \"L1\": L1 norm,\n"
00162                   "  - \"L2_square\" (default): square of the L2 norm.\n");
00163     
00164     declareOption(ol, "output_transfer_func", 
00165                   &FeatureSetSequentialCRF::output_transfer_func, 
00166                   OptionBase::buildoption, 
00167                   "what transfer function to use for ouput layer? One of: \n"
00168                   "  - \"tanh\" \n"
00169                   "  - \"sigmoid\" \n"
00170                   "  - \"softmax\" \n"
00171                   "An empty string or \"none\" means no output transfer function \n");
00172     
00173     declareOption(ol, "hidden_transfer_func", 
00174                   &FeatureSetSequentialCRF::hidden_transfer_func, 
00175                   OptionBase::buildoption, 
00176                   "What transfer function to use for hidden units? One of \n"
00177                   "  - \"linear\" \n"
00178                   "  - \"tanh\" \n"
00179                   "  - \"sigmoid\" \n"
00180                   "  - \"softmax\" \n");
00181     
00182     declareOption(ol, "cost_funcs", &FeatureSetSequentialCRF::cost_funcs, 
00183                   OptionBase::buildoption, 
00184                   "A list of cost functions to use\n"
00185                   "in the form \"[ cf1; cf2; cf3; ... ]\" where each function is one of: \n"
00186                   "  - \"NLL\" (negative log likelihood -log(p[c]) for classification) \n"
00187                   "  - \"class_error\" (classification error) \n"
00188                   "The FIRST function of the list will be used as \n"
00189                   "the objective function to optimize \n"
00190                   "(possibly with an added weight decay penalty) \n");
00191     
00192     declareOption(ol, "start_learning_rate", &FeatureSetSequentialCRF::start_learning_rate, 
00193                   OptionBase::buildoption, 
00194                   "Start learning rate of gradient descent.\n");
00195                   
00196     declareOption(ol, "decrease_constant", &FeatureSetSequentialCRF::decrease_constant, 
00197                   OptionBase::buildoption, 
00198                   "Decrease constant of gradient descent.\n");
00199 
00200     declareOption(ol, "batch_size", &FeatureSetSequentialCRF::batch_size, 
00201                   OptionBase::buildoption, 
00202                   "How many samples to use to estimate the avergage gradient before updating the weights\n"
00203                   "0 is equivalent to specifying training_set->length() \n");
00204 
00205         declareOption(ol, "stochastic_gradient_descent_speedup", &FeatureSetSequentialCRF::stochastic_gradient_descent_speedup, 
00206                   OptionBase::buildoption, 
00207                   "Indication that a trick to speedup stochastic gradient descent\n"
00208                   "should be used.\n");
00209 
00210     declareOption(ol, "initialization_method", 
00211                   &FeatureSetSequentialCRF::initialization_method, OptionBase::buildoption, 
00212                   "The method used to initialize the weights:\n"
00213                   " - \"normal_linear\"  = a normal law with variance 1/n_inputs\n"
00214                   " - \"normal_sqrt\"    = a normal law with variance 1/sqrt(n_inputs)\n"
00215                   " - \"uniform_linear\" = a uniform law in [-1/n_inputs, 1/n_inputs]\n"
00216                   " - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(n_inputs), 1/sqrt(n_inputs)]\n"
00217                   " - \"zero\"           = all weights are set to 0\n");
00218     
00219     declareOption(ol, "dist_rep_dim", &FeatureSetSequentialCRF::dist_rep_dim, 
00220                   OptionBase::buildoption, 
00221                   " Dimensionality (number of components) of distributed representations.\n"
00222                   "If <= 0, than distributed representations will not be used.\n"
00223         );
00224     
00225     declareOption(ol, "possible_targets_vary", 
00226                   &FeatureSetSequentialCRF::possible_targets_vary, OptionBase::buildoption, 
00227                   "Indication that the set of possible targets vary from\n"
00228                   "one input vector to another.\n"
00229         );
00230     
00231     declareOption(ol, "feat_sets", &FeatureSetSequentialCRF::feat_sets, 
00232                                 OptionBase::buildoption, 
00233                   "FeatureSets to apply on input. The number of feature\n"
00234                   "sets should be a divisor of inputsize(). The feature\n"
00235                   "sets applied to the ith input field is the feature\n"
00236                   "set at position i % feat_sets.length().\n"
00237         );
00238 
00239     declareOption(ol, "train_set", &FeatureSetSequentialCRF::train_set, 
00240                   OptionBase::learntoption, 
00241                   "VMatrix used for training, that also provides information about the data (e.g. Dictionary objects for the different fields).\n");
00242 
00243 
00244                   // Networks' learnt parameters
00245     declareOption(ol, "w1", &FeatureSetSequentialCRF::w1, OptionBase::learntoption, 
00246                   "Weights of first hidden layer.\n");
00247     declareOption(ol, "b1", &FeatureSetSequentialCRF::b1, OptionBase::learntoption, 
00248                   "Bias of first hidden layer.\n");
00249     declareOption(ol, "w2", &FeatureSetSequentialCRF::w2, OptionBase::learntoption, 
00250                   "Weights of second hidden layer.\n");
00251     declareOption(ol, "b2", &FeatureSetSequentialCRF::b2, OptionBase::learntoption, 
00252                   "Bias of second hidden layer.\n");
00253     declareOption(ol, "wout", &FeatureSetSequentialCRF::wout, OptionBase::learntoption, 
00254                   "Weights of output layer.\n");
00255     declareOption(ol, "bout", &FeatureSetSequentialCRF::bout, OptionBase::learntoption, 
00256                   "Bias of output layer.\n");
00257     declareOption(ol, "direct_wout", &FeatureSetSequentialCRF::direct_wout, 
00258                   OptionBase::learntoption, 
00259                   "Direct input to output weights.\n");
00260     declareOption(ol, "direct_bout", &FeatureSetSequentialCRF::direct_bout, 
00261                   OptionBase::learntoption, 
00262                   "Direct input to output bias.\n");
00263     declareOption(ol, "wout_dist_rep", &FeatureSetSequentialCRF::wout_dist_rep, 
00264                   OptionBase::learntoption, 
00265                   "Weights of output layer for distributed representation predictor.\n");
00266     declareOption(ol, "bout_dist_rep", &FeatureSetSequentialCRF::bout_dist_rep, 
00267                   OptionBase::learntoption, 
00268                   "Bias of output layer for distributed representation predictor.\n");
00269 
00270     inherited::declareOptions(ol);
00271 
00272 }
00273 
00275 // build //
00277 void FeatureSetSequentialCRF::build()
00278 {
00279     inherited::build();
00280     build_();
00281 }
00282 
00283 
00285 // build_ //
00287 void FeatureSetSequentialCRF::build_()
00288 {
00289     // Don't do anything if we don't have a train_set
00290     // It's the only one who knows the inputsize, targetsize and weightsize
00291 
00292     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00293     {
00294         if(targetsize_ != 1)
00295             PLERROR("In FeatureSetSequentialCRF::build_(): targetsize_ must be 1, not %d",targetsize_);
00296 
00297         n_feat_sets = feat_sets.length();
00298 
00299         if(n_feat_sets == 0)
00300             PLERROR("In FeatureSetSequentialCRF::build_(): at least one FeatureSet must be provided\n");
00301         
00302         if(inputsize_ % n_feat_sets != 0)
00303             PLERROR("In FeatureSetSequentialCRF::build_(): feat_sets.length() must be a divisor of inputsize()");
00304         
00305         // Process penalty type option
00306         string pt = lowerstring( penalty_type );
00307         if( pt == "l1" )
00308             penalty_type = "L1";
00309         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00310             penalty_type = "L2_square";
00311         else if( pt == "l2" )
00312         {
00313             PLWARNING("In FeatureSetSequentialCRF::build_(): L2 penalty not supported, assuming you want L2 square");
00314             penalty_type = "L2_square";
00315         }
00316         else
00317             PLERROR("In FeatureSetSequentialCRF::build_(): penalty_type \"%s\" not supported", penalty_type.c_str());
00318         
00319         int ncosts = cost_funcs.size();  
00320         if(ncosts<=0)
00321             PLERROR("In FeatureSetSequentialCRF::build_(): Empty cost_funcs : must at least specify the cost function to optimize!");
00322         
00323         if(stage <= 0 ) // Training hasn't started
00324         {
00325             // Initialize parameters
00326             initializeParams();                        
00327         }
00328         
00329         output_comp.resize(total_output_size);
00330         row.resize(train_set->width());
00331         row.fill(MISSING_VALUE);
00332         feats.resize(inputsize_);
00333         // Making sure that all feats[i] have non null storage...
00334         for(int i=0; i<feats.length(); i++)
00335         {
00336             feats[i].resize(1);
00337             feats[i].resize(0);
00338         }
00339         if(fixed_output_weights && stochastic_gradient_descent_speedup)
00340             PLERROR("In  FeatureSetSequentialCRF::build_(): cannot use stochastic gradient descent speedup with fixed output weights");
00341         val_string_reference_set = train_set;
00342         target_values_reference_set = train_set;
00343     }
00344 }
00345 
00346 void FeatureSetSequentialCRF::fprop(const Vec& inputv, Vec& outputv, const Vec& targetv, Vec& costsv, real sampleweight) const
00347 {
00348     
00349     fpropOutput(inputv,outputv);
00350     //if(is_missing(outputv[0]))
00351     //    cout << "What the fuck" << endl;
00352     fpropCostsFromOutput(inputv, outputv, targetv, costsv, sampleweight);
00353     //if(is_missing(costsv[0]))
00354     //    cout << "Re-What the fuck" << endl;
00355 
00356 }
00357 
00358 void FeatureSetSequentialCRF::fpropOutput(const Vec& inputv, Vec& outputv) const
00359 {
00360     // Get possible target values
00361     if(possible_targets_vary) 
00362     {
00363         row.subVec(0,inputsize_) << inputv;
00364         target_values_reference_set->getValues(row,inputsize_,target_values);
00365         outputv.resize(target_values.length());
00366     }
00367 
00368     // Get features
00369     ni = inputsize_;
00370     nfeats = 0;
00371     for(int i=0; i<ni; i++)
00372     {
00373         str = val_string_reference_set->getValString(i,inputv[i]);
00374         feat_sets[i%n_feat_sets]->getFeatures(str,feats[i]);
00375         nfeats += feats[i].length();
00376     }
00377     
00378     feat_input.resize(nfeats);
00379     offset = 0;
00380     id = 0;
00381     for(int i=0; i<ni; i++)
00382     {
00383         f = feats[i].data();
00384         nj = feats[i].length();
00385         for(int j=0; j<nj; j++)
00386             feat_input[id++] = offset + *f++;
00387         if(dist_rep_dim <= 0 || ((i+1) % n_feat_sets != 0))
00388             offset += feat_sets[i % n_feat_sets]->size();
00389         else
00390             offset = 0;
00391     }
00392 
00393     // Fprop to output
00394     if(dist_rep_dim > 0) // x -> d(x)
00395     {        
00396         nfeats = 0;
00397         id = 0;
00398         for(int i=0; i<inputsize_;)
00399         {
00400             ifeats = 0;
00401             for(int j=0; j<n_feat_sets; j++,i++)
00402                 ifeats += feats[i].length();
00403             
00404             add_affine_transform(feat_input.subVec(nfeats,ifeats),
00405                                  wout_dist_rep, bout_dist_rep,
00406                                  nnet_input.subVec(id*dist_rep_dim,dist_rep_dim),
00407                                       true, false);
00408             nfeats += ifeats;
00409             id++;
00410         }
00411 
00412         if(nhidden>0) // d(x) -> h1(d(x))
00413         {
00414             add_affine_transform(nnet_input,w1,b1,hiddenv,false,false);
00415             add_transfer_func(hiddenv);
00416 
00417             if(nhidden2>0) // h1(d(x)) -> h2(h1(d(x)))
00418             {
00419                 add_affine_transform(hiddenv,w2,b2,hidden2v,false,false);
00420                 add_transfer_func(hidden2v);
00421                 last_layer = hidden2v;
00422             }
00423             else
00424                 last_layer = hiddenv;
00425         }
00426         else
00427             last_layer = nnet_input;
00428 
00429         // d(x),h1(d(x)),h2(h1(d(x))) -> o(x)
00430 
00431         add_affine_transform(last_layer,wout,bout,outputv,false,
00432                              possible_targets_vary,target_values);            
00433         if(direct_in_to_out && nhidden>0)
00434             add_affine_transform(nnet_input,direct_wout,direct_bout,
00435                                  outputv,false,possible_targets_vary,target_values);
00436     }
00437     else
00438     {        
00439         if(nhidden>0) // x -> h1(x)
00440         {
00441             add_affine_transform(feat_input,w1,b1,hiddenv,true,false);
00442             // Transfert function
00443             add_transfer_func(hiddenv);
00444 
00445             if(nhidden2>0) // h1(x) -> h2(h1(x))
00446             {
00447                 add_affine_transform(hiddenv,w2,b2,hidden2v,true,false);
00448                 add_transfer_func(hidden2v);
00449                 last_layer = hidden2v;
00450             }
00451             else
00452                 last_layer = hiddenv;
00453         }
00454         else
00455             last_layer = feat_input;
00456         // x, h1(x),h2(h1(x)) -> o(x)
00457         add_affine_transform(last_layer,wout,bout,outputv,nhidden<=0,
00458                              possible_targets_vary,target_values);            
00459         if(direct_in_to_out && nhidden>0)
00460             add_affine_transform(feat_input,direct_wout,direct_bout,
00461                                  outputv,true,possible_targets_vary,target_values);
00462     }
00463 
00464                                
00465     if (nhidden2>0 && nhidden<=0)
00466         PLERROR("FeatureSetSequentialCRF::fprop(): can't have nhidden2 (=%d) > 0 while nhidden=0",nhidden2);
00467     
00468     if(output_transfer_func!="" && output_transfer_func!="none")
00469        add_transfer_func(outputv, output_transfer_func);
00470 }
00471 
00472 void FeatureSetSequentialCRF::fpropCostsFromOutput(const Vec& inputv, const Vec& outputv, const Vec& targetv, Vec& costsv, real sampleweight) const
00473 {
00474     //Compute cost
00475 
00476     if(possible_targets_vary)
00477     {
00478         reind_target = target_values.find(targetv[0]);
00479         if(reind_target<0)
00480             PLERROR("In FeatureSetSequentialCRF::fprop(): target %d is not in possible targets", targetv[0]);
00481     }
00482     else
00483         reind_target = (int)targetv[0];
00484 
00485     // Build cost function
00486 
00487     int ncosts = cost_funcs.size();
00488     for(int k=0; k<ncosts; k++)
00489     {
00490         if(cost_funcs[k]=="NLL") 
00491         {
00492             costsv[k] = sampleweight*nll(outputv,reind_target);
00493         }
00494         else if(cost_funcs[k]=="class_error")
00495             costsv[k] = sampleweight*classification_loss(outputv, reind_target);
00496         else 
00497             PLERROR("In FeatureSetSequentialCRF::fprop(): unknown cost_func option: %s",cost_funcs[k].c_str());        
00498     }
00499 }
00500 
00501 void FeatureSetSequentialCRF::bprop(Vec& inputv, Vec& outputv, Vec& targetv, Vec& costsv, real learning_rate, real sampleweight)
00502 {
00503     if(possible_targets_vary) 
00504     {
00505         gradient_outputv.resize(target_values.length());
00506         gradient_act_outputv.resize(target_values.length());
00507         if(!stochastic_gradient_descent_speedup)
00508             target_values_since_last_update.append(target_values);
00509     }
00510 
00511     if(!stochastic_gradient_descent_speedup)
00512         feats_since_last_update.append(feat_input);
00513 
00514     // Gradient through cost
00515     if(cost_funcs[0]=="NLL") 
00516     {
00517         // Permits to avoid numerical precision errors
00518         if(output_transfer_func == "softmax")
00519             gradient_outputv[reind_target] = learning_rate*sampleweight;
00520         else
00521             gradient_outputv[reind_target] = learning_rate*sampleweight/(outputv[reind_target]);            
00522     }
00523     else if(cost_funcs[0]=="class_error")
00524     {
00525         PLERROR("FeatureSetSequentialCRF::bprop(): gradient cannot be computed for \"class_error\" cost");
00526     }
00527 
00528     // Gradient through output transfer function
00529     if(output_transfer_func != "linear")
00530     {
00531         if(cost_funcs[0]=="NLL" && output_transfer_func == "softmax")
00532             gradient_transfer_func(outputv,gradient_act_outputv, gradient_outputv,
00533                                     output_transfer_func, reind_target);
00534         else
00535             gradient_transfer_func(outputv,gradient_act_outputv, gradient_outputv,
00536                                     output_transfer_func);
00537         gradient_last_layer = gradient_act_outputv;
00538     }
00539     else
00540         gradient_last_layer = gradient_act_outputv;
00541     
00542     // Gradient through output affine transform
00543 
00544 
00545     if(nhidden2 > 0) {
00546         gradient_affine_transform(hidden2v, wout, bout, gradient_hidden2v, 
00547                                   gradient_wout, gradient_bout, gradient_last_layer,
00548                                   false, possible_targets_vary, learning_rate, 
00549                                   weight_decay+output_layer_weight_decay,
00550                                   bias_decay+output_layer_bias_decay,
00551                                   target_values);
00552     }
00553     else if(nhidden > 0) 
00554     {
00555         gradient_affine_transform(hiddenv, wout, bout, gradient_hiddenv,
00556                                   gradient_wout, gradient_bout, gradient_last_layer,
00557                                   false, possible_targets_vary, learning_rate, 
00558                                   weight_decay+output_layer_weight_decay,
00559                                   bias_decay+output_layer_bias_decay, target_values);
00560     }
00561     else
00562     {
00563         gradient_affine_transform(nnet_input, wout, bout, gradient_nnet_input, 
00564                                   gradient_wout, gradient_bout, gradient_last_layer,
00565                                   (dist_rep_dim <= 0), possible_targets_vary, learning_rate, 
00566                                   weight_decay+output_layer_weight_decay,
00567                                   bias_decay+output_layer_bias_decay, target_values);
00568     }
00569 
00570 
00571     if(nhidden2 > 0)
00572     {
00573         gradient_transfer_func(hidden2v,gradient_act_hidden2v,gradient_hidden2v);
00574         gradient_affine_transform(hiddenv, w2, b2, gradient_hiddenv, 
00575                                   gradient_w2, gradient_b2, gradient_act_hidden2v,
00576                                   false, false,learning_rate, 
00577                                   weight_decay+layer2_weight_decay,
00578                                   bias_decay+layer2_bias_decay);
00579     }
00580     if(nhidden > 0)
00581     {
00582         gradient_transfer_func(hiddenv,gradient_act_hiddenv,gradient_hiddenv);  
00583         gradient_affine_transform(nnet_input, w1, b1, gradient_nnet_input, 
00584                                   gradient_w1, gradient_b1, gradient_act_hiddenv,
00585                                   dist_rep_dim<=0, false,learning_rate, 
00586                                   weight_decay+layer1_weight_decay,
00587                                   bias_decay+layer1_bias_decay);
00588     }
00589 
00590     if(nhidden>0 && direct_in_to_out)
00591     {
00592         gradient_affine_transform(nnet_input, direct_wout, direct_bout,
00593                                   gradient_nnet_input, 
00594                                   gradient_direct_wout, gradient_direct_bout,
00595                                   gradient_last_layer,
00596                                   dist_rep_dim<=0, possible_targets_vary,learning_rate, 
00597                                   weight_decay+direct_in_to_out_weight_decay,
00598                                   0,
00599                                   target_values);
00600     }
00601 
00602     if(dist_rep_dim > 0)
00603     {
00604         nfeats = 0;
00605         id = 0;
00606         for(int i=0; i<inputsize_; )
00607         {
00608             ifeats = 0;
00609             for(int j=0; j<n_feat_sets; j++,i++)
00610                 ifeats += feats[i].length();
00611             gradient_affine_transform(feat_input.subVec(nfeats,ifeats),
00612                                       wout_dist_rep, bout_dist_rep,
00613                                       //gradient_feat_input.subVec(nfeats,feats[i].length()),
00614                                       gradient_feat_input,// Useless anyways...
00615                                       gradient_wout_dist_rep,
00616                                       gradient_bout_dist_rep,
00617                                       gradient_nnet_input.subVec(id*dist_rep_dim,dist_rep_dim),
00618                                       true, false, learning_rate, 
00619                                       weight_decay+output_layer_dist_rep_weight_decay,
00620                                       bias_decay+output_layer_dist_rep_bias_decay);
00621             nfeats += ifeats;
00622             id++;
00623         }
00624     }
00625     clearProppathGradient();
00626 }
00627 
00628 void FeatureSetSequentialCRF::update()
00629 {
00630 
00631     if(dist_rep_dim > 0)
00632     {
00633         update_affine_transform(feats_since_last_update, wout_dist_rep, 
00634                                 bout_dist_rep, gradient_wout_dist_rep,
00635                                 gradient_bout_dist_rep, true, false,
00636                                 target_values_since_last_update);
00637     }
00638 
00639     if(nhidden>0) 
00640     {
00641         update_affine_transform(feats_since_last_update, w1, b1, 
00642                                 gradient_w1, gradient_b1,
00643                                 dist_rep_dim<=0, false,
00644                                 target_values_since_last_update);
00645         if(nhidden2>0) 
00646         {
00647             update_affine_transform(feats_since_last_update, w2, b2, 
00648                                     gradient_w2, gradient_b2,
00649                                     false, false,
00650                                     target_values_since_last_update);
00651         }
00652 
00653         update_affine_transform(feats_since_last_update, wout, bout, 
00654                                 gradient_wout, gradient_bout,
00655                                 false, possible_targets_vary,
00656                                 target_values_since_last_update);
00657         if(direct_in_to_out)
00658         {
00659             update_affine_transform(feats_since_last_update, direct_wout, 
00660                                     direct_bout, 
00661                                     gradient_direct_wout, gradient_direct_bout,
00662                                     false, possible_targets_vary,
00663                                     target_values_since_last_update);
00664         }
00665     }
00666     else
00667     {
00668         update_affine_transform(feats_since_last_update, wout, bout, 
00669                                 gradient_wout, gradient_bout,
00670                                 dist_rep_dim<=0, possible_targets_vary,
00671                                 target_values_since_last_update);
00672     }
00673 
00674     feats_since_last_update.resize(0);
00675     target_values_since_last_update.resize(0);
00676 }
00677 
00678 void FeatureSetSequentialCRF::update_affine_transform(
00679     Vec input, Mat weights, Vec bias,
00680     Mat gweights, Vec gbias,
00681     bool input_is_sparse, bool output_is_sparse,
00682     Vec output_indices) 
00683 {
00684     // Bias
00685     if(bias.length() != 0)
00686     {
00687         if(output_is_sparse)
00688         {
00689             pval1 = gbias.data();
00690             pval2 = bias.data();
00691             pval3 = output_indices.data();
00692             ni = output_indices.length();
00693             for(int i=0; i<ni; i++)
00694             {
00695                 pval2[(int)*pval3] += pval1[(int)*pval3];
00696                 pval1[(int)*pval3] = 0;
00697                 pval3++;
00698             }
00699         }
00700         else
00701         {
00702             pval1 = gbias.data();
00703             pval2 = bias.data();
00704             ni = bias.length();
00705             for(int i=0; i<ni; i++)
00706             {
00707                 *pval2 += *pval1;
00708                 *pval1 = 0;
00709                 pval1++; 
00710                 pval2++;
00711             }
00712         }
00713     }
00714 
00715     // Weights
00716     if(!input_is_sparse && !output_is_sparse)
00717     {
00718         if(!gweights.isCompact() || !weights.isCompact())
00719             PLERROR("In FeatureSetSequentialCRF::update_affine_transform(): weights or gweights is not a compact TMat");
00720         ni = weights.length();
00721         nj = weights.width();
00722         pval1 = gweights.data();
00723         pval2 = weights.data();
00724         for(int i=0; i<ni; i++)
00725             for(int j=0; j<nj; j++)
00726             {
00727                 *pval2 += *pval1;
00728                 *pval1 = 0;
00729                 pval1++;
00730                 pval2++;
00731             }
00732     }
00733     else if(!input_is_sparse && output_is_sparse)
00734     {
00735         ni = output_indices.length();
00736         nj = input.length();
00737         pval3 = output_indices.data();
00738         for(int i=0; i<ni; i++)
00739         {
00740             for(int j=0; j<nj; j++)
00741             {
00742                 weights(j,(int)*pval3) += gweights(j,(int)*pval3);
00743                 gweights(j,(int)*pval3) = 0;
00744             }
00745             pval3++;
00746         }
00747     }
00748     else if(input_is_sparse && !output_is_sparse)
00749     {
00750         ni = input.length();
00751         nj = weights.width();
00752         pval3 = input.data();
00753         for(int i=0; i<ni; i++)
00754         {
00755             pval1 = gweights[(int)(*pval3)];
00756             pval2 = weights[(int)(*pval3++)];
00757             for(int j=0; j<nj;j++)
00758             {
00759                 *pval2 += *pval1;
00760                 *pval1 = 0;
00761                 pval1++;
00762                 pval2++;
00763             }
00764         }
00765     }
00766     else if(input_is_sparse && output_is_sparse)
00767     {
00768         // Weights
00769         ni = input.length();
00770         nj = output_indices.length();
00771         pval2 = input.data();
00772         for(int i=0; i<ni; i++)
00773         {
00774             pval3 = output_indices.data();
00775             for(int j=0; j<nj; j++)
00776             {
00777                 weights((int)(*pval2),(int)*pval3) += gweights((int)(*pval2),(int)*pval3);
00778                 gweights((int)(*pval2),(int)*pval3) = 0;
00779                 pval3++;
00780             }
00781             pval2++;
00782         }
00783     }
00784 }
00785 
00787 void FeatureSetSequentialCRF::clearProppathGradient()
00788 {
00789     // Trick to make clearProppathGradient faster...
00790     if(cost_funcs[0]=="NLL") 
00791         gradient_outputv[reind_target] = 0;
00792     else
00793         gradient_outputv.clear();
00794     gradient_act_outputv.clear();
00795     
00796     if(dist_rep_dim>0)
00797         gradient_nnet_input.clear();
00798 
00799     if(nhidden>0) 
00800     {
00801         gradient_hiddenv.clear();
00802         gradient_act_hiddenv.clear();
00803         if(nhidden2>0) 
00804         {
00805             gradient_hidden2v.clear();
00806             gradient_act_hidden2v.clear();
00807         }
00808     }
00809 }
00810 
00811 
00813 // computeCostsFromOutputs //
00815 void FeatureSetSequentialCRF::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv, 
00816                                    const Vec& targetv, Vec& costsv) const
00817 {
00818     PLERROR("In FeatureSetSequentialCRF::computeCostsFromOutputs(): output is not enough to compute costs");
00819 }
00820 
00821 int FeatureSetSequentialCRF::my_argmax(const Vec& vec, int default_compare) const
00822 {
00823 #ifdef BOUNDCHECK
00824     if(vec.length()==0)
00825         PLERROR("IN int argmax(const TVec<T>& vec) vec has zero length");
00826 #endif
00827     real* v = vec.data();
00828     int indexmax = default_compare;
00829     real maxval = v[default_compare];
00830     for(int i=0; i<vec.length(); i++)
00831         if(v[i]>maxval)
00832         {
00833             maxval = v[i];
00834             indexmax = i;
00835         }
00836     return indexmax;
00837 }
00838 
00840 // computeOutput //
00842 void FeatureSetSequentialCRF::computeOutput(const Vec& inputv, Vec& outputv) const
00843 {
00844     fpropOutput(inputv, output_comp);
00845     if(possible_targets_vary)
00846     {
00847         //row.subVec(0,inputsize_) << inputv;
00848         //target_values_reference_set->getValues(row,inputsize_,target_values);
00849         outputv[0] = target_values[my_argmax(output_comp,rgen->uniform_multinomial_sample(output_comp.length()))];
00850     }
00851     else
00852         outputv[0] = argmax(output_comp);
00853 }
00854 
00856 // computeOutputAndCosts //
00858 void FeatureSetSequentialCRF::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
00859                                  Vec& outputv, Vec& costsv) const
00860 {
00861     fprop(inputv,output_comp,targetv,costsv);
00862     if(possible_targets_vary)
00863     {
00864         //row.subVec(0,inputsize_) << inputv;
00865         //target_values_reference_set->getValues(row,inputsize_,target_values);
00866         outputv[0] = target_values[my_argmax(output_comp,rgen->uniform_multinomial_sample(output_comp.length()))];
00867     }
00868     else
00869         outputv[0] = argmax(output_comp);
00870 }
00871 
00873 // fillWeights //
00875 void FeatureSetSequentialCRF::fillWeights(const Mat& weights) {
00876     if (initialization_method == "zero") {
00877         weights.clear();
00878         return;
00879     }
00880     real delta;
00881     int is = weights.length();
00882     if (initialization_method.find("linear") != string::npos)
00883         delta = 1.0 / real(is);
00884     else
00885         delta = 1.0 / sqrt(real(is));
00886     if (initialization_method.find("normal") != string::npos)
00887         rgen->fill_random_normal(weights, 0, delta);
00888     else
00889         rgen->fill_random_uniform(weights, -delta, delta);
00890 }
00891 
00893 // forget //
00895 void FeatureSetSequentialCRF::forget()
00896 {
00897     if (train_set) build();
00898     total_updates=0;
00899     stage = 0;
00900 }
00901 
00903 // getTrainCostNames //
00905 TVec<string> FeatureSetSequentialCRF::getTrainCostNames() const
00906 {
00907     return cost_funcs;
00908 }
00909 
00911 // getTestCostNames //
00913 TVec<string> FeatureSetSequentialCRF::getTestCostNames() const
00914 { 
00915     return cost_funcs;
00916 }
00917 
00919 // add_transfer_func //
00921 void FeatureSetSequentialCRF::add_transfer_func(const Vec& input, string transfer_func) const
00922 {
00923     if (transfer_func == "default")
00924         transfer_func = hidden_transfer_func;
00925     if(transfer_func=="linear")
00926         return;
00927     else if(transfer_func=="tanh")
00928     {
00929         compute_tanh(input,input);
00930         return;
00931     }        
00932     else if(transfer_func=="sigmoid")
00933     {
00934         compute_sigmoid(input,input);
00935         return;
00936     }
00937     else if(transfer_func=="softmax")
00938     {
00939         compute_softmax(input,input);
00940         return;
00941     }
00942     else PLERROR("In FeatureSetSequentialCRF::add_transfer_func(): Unknown value for transfer_func: %s",transfer_func.c_str());
00943 }
00944 
00946 // gradient_transfer_func //
00948 void FeatureSetSequentialCRF::gradient_transfer_func(Vec& output, Vec& gradient_input, Vec& gradient_output, string transfer_func, int nll_softmax_speed_up_target) {
00949     if (transfer_func == "default")        
00950         transfer_func = hidden_transfer_func;
00951     if(transfer_func=="linear")
00952     {
00953         pval1 = gradient_output.data();
00954         pval2 = gradient_input.data();
00955         ni = output.length();
00956         for(int i=0; i<ni; i++)
00957             *pval2++ += *pval1++;
00958         return;
00959     }
00960     else if(transfer_func=="tanh")
00961     {
00962         pval1 = gradient_output.data();
00963         pval2 = output.data();
00964         pval3 = gradient_input.data();
00965         ni = output.length();
00966         for(int i=0; i<ni; i++)
00967             *pval3++ += (*pval1++)*(1.0-square(*pval2++));
00968         return;
00969     }        
00970     else if(transfer_func=="sigmoid")
00971     {
00972         pval1 = gradient_output.data();
00973         pval2 = output.data();
00974         pval3 = gradient_input.data();
00975         ni = output.length();
00976         for(int i=0; i<ni; i++)
00977         {
00978             *pval3++ += (*pval1++)*(*pval2)*(1.0-*pval2);
00979             pval2++;
00980         }   
00981         return;
00982     }
00983     else if(transfer_func=="softmax")
00984     {
00985         if(nll_softmax_speed_up_target<0)
00986         {            
00987             pval3 = gradient_input.data();
00988             ni = nk = output.length();
00989             for(int i=0; i<ni; i++)
00990             {
00991                 val = output[i];
00992                 pval1 = gradient_output.data();
00993                 pval2 = output.data();
00994                 for(int k=0; k<nk; k++)
00995                     if(k!=i)
00996                         *pval3 -= *pval1++ * val * (*pval2++);
00997                     else
00998                     {
00999                         *pval3 += *pval1++ * val * (1.0-val);
01000                         pval2++;
01001                     }
01002                 pval3++;                
01003             }   
01004         }
01005         else // Permits speedup and avoids numerical precision errors
01006         {
01007             pval2 = output.data();
01008             pval3 = gradient_input.data();
01009             ni = output.length();
01010             grad = gradient_output[nll_softmax_speed_up_target];
01011             val = output[nll_softmax_speed_up_target];
01012             for(int i=0; i<ni; i++)
01013             {
01014                 if(nll_softmax_speed_up_target!=i)
01015                     //*pval3++ -= grad * val * (*pval2++);
01016                     *pval3++ -= grad * (*pval2++);
01017                 else
01018                 {
01019                     //*pval3++ += grad * val * (1.0-val);
01020                     *pval3++ += grad * (1.0-val);
01021                     pval2++;
01022                 }
01023             }   
01024         }
01025         return;
01026     }
01027     else PLERROR("In FeatureSetSequentialCRF::gradient_transfer_func(): Unknown value for transfer_func: %s",transfer_func.c_str());
01028 }
01029 
01030 void FeatureSetSequentialCRF::add_affine_transform(Vec input, Mat weights, Vec bias, Vec output, 
01031                           bool input_is_sparse, bool output_is_sparse,
01032                           Vec output_indices) const
01033 {
01034     // Bias
01035     if(bias.length() != 0)
01036     {
01037         if(output_is_sparse)
01038         {
01039             pval1 = output.data();
01040             pval2 = bias.data();
01041             pval3 = output_indices.data();
01042             ni = output.length();
01043             for(int i=0; i<ni; i++)
01044                 *pval1++ = pval2[(int)*pval3++];
01045         }
01046         else
01047         {
01048             pval1 = output.data();
01049             pval2 = bias.data();
01050             ni = output.length();
01051             for(int i=0; i<ni; i++)
01052                 *pval1++ = *pval2++;
01053         }
01054     }
01055 
01056     // Weights
01057     if(!input_is_sparse && !output_is_sparse)
01058     {
01059         transposeProductAcc(output,weights,input);
01060     }
01061     else if(!input_is_sparse && output_is_sparse)
01062     {
01063         ni = output.length();
01064         nj = input.length();
01065         pval1 = output.data();
01066         pval3 = output_indices.data();
01067         for(int i=0; i<ni; i++)
01068         {
01069             pval2 = input.data();
01070             for(int j=0; j<nj; j++)
01071                 *pval1 += (*pval2++)*weights(j,(int)*pval3);
01072             pval1++;
01073             pval3++;
01074         }
01075     }
01076     else if(input_is_sparse && !output_is_sparse)
01077     {
01078         ni = input.length();
01079         nj = output.length();
01080         if(ni != 0)
01081         {
01082             pval3 = input.data();
01083             for(int i=0; i<ni; i++)
01084             {
01085                 pval1 = output.data();
01086                 pval2 = weights[(int)(*pval3++)];
01087                 for(int j=0; j<nj;j++)
01088                     *pval1++ += *pval2++;
01089             }
01090         }
01091     }
01092     else if(input_is_sparse && output_is_sparse)
01093     {
01094         // Weights
01095         ni = input.length();
01096         nj = output.length();
01097         if(ni != 0)
01098         {
01099             pval2 = input.data();
01100             for(int i=0; i<ni; i++)
01101             {
01102                 pval1 = output.data();
01103                 pval3 = output_indices.data();
01104                 for(int j=0; j<nj; j++)
01105                     *pval1++ += weights((int)(*pval2),(int)*pval3++);
01106                 pval2++;
01107             }
01108         }
01109     }
01110 }
01111 
01112 void FeatureSetSequentialCRF::gradient_affine_transform(Vec input, Mat weights, Vec bias, 
01113                                                Vec ginput, Mat gweights, Vec gbias,
01114                                                Vec goutput, bool input_is_sparse, 
01115                                                bool output_is_sparse,
01116                                                real learning_rate,
01117                                                real weight_decay, real bias_decay,
01118                                                Vec output_indices)
01119 {
01120     // Bias
01121     if(bias.length() != 0)
01122     {
01123         if(output_is_sparse)
01124         {
01125             pval1 = gbias.data();
01126             pval2 = goutput.data();
01127             pval3 = output_indices.data();
01128             ni = goutput.length();
01129             
01130             if(fast_exact_is_equal(bias_decay, 0))
01131             {
01132                 // Without bias decay
01133                 for(int i=0; i<ni; i++)
01134                     pval1[(int)*pval3++] += *pval2++;
01135             }
01136             else
01137             {
01138                 // With bias decay
01139                 if(penalty_type == "L2_square")
01140                 {
01141                     pval4 = bias.data();
01142                     val = -two(learning_rate)*bias_decay;
01143                     for(int i=0; i<ni; i++)
01144                     {
01145                         pval1[(int)*pval3] += *pval2++ + val*(pval4[(int)*pval3]);
01146                         pval3++;
01147                     }
01148                 }
01149                 else if(penalty_type == "L1")
01150                 {
01151                     pval4 = bias.data();
01152                     val = -learning_rate*bias_decay;
01153                     for(int i=0; i<ni; i++)
01154                     {
01155                         val2 = pval4[(int)*pval3];
01156                         if(val2 > 0 )
01157                             pval1[(int)*pval3] += *pval2 + val;
01158                         else if(val2 < 0)
01159                             pval1[(int)*pval3] += *pval2 - val;
01160                         pval2++;
01161                         pval3++;
01162                     }
01163                 }
01164             }
01165         }
01166         else
01167         {
01168             pval1 = gbias.data();
01169             pval2 = goutput.data();
01170             ni = goutput.length();
01171             if(fast_exact_is_equal(bias_decay, 0))
01172             {
01173                 // Without bias decay
01174                 for(int i=0; i<ni; i++)
01175                     *pval1++ += *pval2++;
01176             }
01177             else
01178             {
01179                 // With bias decay
01180                 if(penalty_type == "L2_square")
01181                 {
01182                     pval3 = bias.data();
01183                     val = -two(learning_rate)*bias_decay;
01184                     for(int i=0; i<ni; i++)
01185                     {
01186                         *pval1++ += *pval2++ + val * (*pval3++);
01187                     }
01188                 }
01189                 else if(penalty_type == "L1")
01190                 {
01191                     pval3 = bias.data();
01192                     val = -learning_rate*bias_decay;
01193                     for(int i=0; i<ni; i++)
01194                     {
01195                         if(*pval3 > 0)
01196                             *pval1 += *pval2 + val;
01197                         else if(*pval3 < 0)
01198                             *pval1 += *pval2 - val;
01199                         pval1++;
01200                         pval2++;
01201                         pval3++;
01202                     }
01203                 }
01204             }
01205         }
01206     }
01207 
01208     // Weights and input (when appropriate)
01209     if(!input_is_sparse && !output_is_sparse)
01210     {        
01211         // Input
01212         //productAcc(ginput, weights, goutput);
01213         // Weights
01214         //externalProductAcc(gweights, input, goutput);
01215 
01216         // Faster code to do this, which limits the accesses
01217         // to memory
01218 
01219         ni = input.length();
01220         nj = goutput.length();
01221         pval3 = ginput.data();
01222         pval5 = input.data();
01223         
01224         if(fast_exact_is_equal(weight_decay, 0))
01225         {
01226             // Without weight decay
01227             for(int i=0; i<ni; i++) {
01228                 
01229                 pval1 = goutput.data();
01230                 pval2 = weights[i];
01231                 pval4 = gweights[i];
01232                 for(int j=0; j<nj; j++) {
01233                     *pval3 += *pval2 * (*pval1);
01234                     *pval4 += *pval5 * (*pval1);
01235                     pval1++;
01236                     pval2++;
01237                     pval4++;
01238                 }
01239                 pval3++;
01240                 pval5++;
01241             }   
01242         }
01243         else
01244         {
01245             //With weight decay            
01246             if(penalty_type == "L2_square")
01247             {
01248                 val = -two(learning_rate)*weight_decay;
01249                 for(int i=0; i<ni; i++) {   
01250                     pval1 = goutput.data();
01251                     pval2 = weights[i];
01252                     pval4 = gweights[i];
01253                     for(int j=0; j<nj; j++) {
01254                         *pval3 += *pval2 * (*pval1);
01255                         *pval4 += *pval5 * (*pval1) + val * (*pval2);
01256                         pval1++;
01257                         pval2++;
01258                         pval4++;
01259                     }
01260                     pval3++;
01261                     pval5++;
01262                 }
01263             }
01264             else if(penalty_type == "L1")
01265             {
01266                 val = -learning_rate*weight_decay;
01267                 for(int i=0; i<ni; i++) {
01268                     
01269                     pval1 = goutput.data();
01270                     pval2 = weights[i];
01271                     pval4 = gweights[i];
01272                     for(int j=0; j<nj; j++) {
01273                         *pval3 += *pval2 * (*pval1);
01274                         if(*pval2 > 0)
01275                             *pval4 += *pval5 * (*pval1) + val;
01276                         else if(*pval2 < 0)
01277                             *pval4 += *pval5 * (*pval1) - val;
01278                         pval1++;
01279                         pval2++;
01280                         pval4++;
01281                     }
01282                     pval3++;
01283                     pval5++;
01284                 }
01285             }
01286         }
01287     }
01288     else if(!input_is_sparse && output_is_sparse)
01289     {
01290         ni = goutput.length();
01291         nj = input.length();
01292         pval1 = goutput.data();
01293         pval3 = output_indices.data();
01294         
01295         if(fast_exact_is_equal(weight_decay, 0))
01296         {
01297             // Without weight decay
01298             for(int i=0; i<ni; i++)
01299             {
01300                 pval2 = input.data();
01301                 pval4 = ginput.data();
01302                 for(int j=0; j<nj; j++)
01303                 {
01304                     // Input
01305                     *pval4++ += weights(j,(int)(*pval3))*(*pval1);
01306                     // Weights
01307                     gweights(j,(int)(*pval3)) += (*pval2++)*(*pval1);
01308                 }
01309                 pval1++;
01310                 pval3++;
01311             }
01312         }
01313         else
01314         {
01315             // With weight decay
01316             if(penalty_type == "L2_square")
01317             {
01318                 val = -two(learning_rate)*weight_decay;
01319                 for(int i=0; i<ni; i++)
01320                 {
01321                     pval2 = input.data();
01322                     pval4 = ginput.data();
01323                     for(int j=0; j<nj; j++)
01324                     {
01325                         val2 = weights(j,(int)(*pval3));
01326                         // Input
01327                         *pval4++ += val2*(*pval1);
01328                         // Weights
01329                         gweights(j,(int)(*pval3)) += (*pval2++)*(*pval1) + val*val2;
01330                     }
01331                     pval1++;
01332                     pval3++;
01333                 }
01334             }
01335             else if(penalty_type == "L1")
01336             {
01337                 val = -learning_rate*weight_decay;
01338                 for(int i=0; i<ni; i++)
01339                 {
01340                     pval2 = input.data();
01341                     pval4 = ginput.data();
01342                     for(int j=0; j<nj; j++)
01343                     {
01344                         val2 = weights(j,(int)(*pval3));
01345                         // Input
01346                         *pval4++ += val2*(*pval1);
01347                         // Weights
01348                         if(val2 > 0)
01349                             gweights(j,(int)(*pval3)) += (*pval2)*(*pval1) + val;
01350                         else if(val2 < 0)
01351                             gweights(j,(int)(*pval3)) += (*pval2)*(*pval1) - val;
01352                         pval2++;
01353                     }
01354                     pval1++;
01355                     pval3++;
01356                 }
01357             }
01358         }
01359     }
01360     else if(input_is_sparse && !output_is_sparse)
01361     {
01362         ni = input.length();
01363         nj = goutput.length();
01364 
01365         if(fast_exact_is_equal(weight_decay, 0))
01366         {
01367             // Without weight decay
01368             if(ni != 0)
01369             {
01370                 pval3 = input.data();
01371                 for(int i=0; i<ni; i++)
01372                 {
01373                     pval1 = goutput.data();
01374                     pval2 = gweights[(int)(*pval3++)];
01375                     for(int j=0; j<nj;j++)
01376                         *pval2++ += *pval1++;
01377                 }
01378             }
01379         }
01380         else
01381         {
01382             // With weight decay
01383             if(penalty_type == "L2_square")
01384             {
01385                 if(ni != 0)
01386                 {
01387                     pval3 = input.data();                    
01388                     val = -two(learning_rate)*weight_decay;
01389                     for(int i=0; i<ni; i++)
01390                     {
01391                         pval1 = goutput.data();
01392                         pval2 = gweights[(int)(*pval3)];
01393                         pval4 = weights[(int)(*pval3++)];
01394                         for(int j=0; j<nj;j++)
01395                         {
01396                             *pval2++ += *pval1++ + val * (*pval4++);
01397                         }
01398                     }
01399                 }
01400             }
01401             else if(penalty_type == "L1")
01402             {
01403                 if(ni != 0)
01404                 {
01405                     pval3 = input.data();
01406                     val = learning_rate*weight_decay;
01407                     for(int i=0; i<ni; i++)
01408                     {
01409                         pval1 = goutput.data();
01410                         pval2 = gweights[(int)(*pval3)];
01411                         pval4 = weights[(int)(*pval3++)];
01412                         for(int j=0; j<nj;j++)
01413                         {
01414                             if(*pval4 > 0)
01415                                 *pval2 += *pval1 + val;
01416                             else if(*pval4 < 0)
01417                                 *pval2 += *pval1 - val;
01418                             pval1++;
01419                             pval2++;
01420                             pval4++;
01421                         }
01422                     }
01423                 }
01424             }
01425         }
01426     }
01427     else if(input_is_sparse && output_is_sparse)
01428     {
01429         ni = input.length();
01430         nj = goutput.length();
01431 
01432         if(fast_exact_is_equal(weight_decay, 0))
01433         {
01434             // Without weight decay
01435             if(ni != 0)
01436             {
01437                 pval2 = input.data();
01438                 for(int i=0; i<ni; i++)
01439                 {
01440                     pval1 = goutput.data();
01441                     pval3 = output_indices.data();
01442                     for(int j=0; j<nj; j++)
01443                         gweights((int)(*pval2),(int)*pval3++) += *pval1++;
01444                     pval2++;
01445                 }
01446             }
01447         }
01448         else
01449         {
01450             // With weight decay
01451             if(penalty_type == "L2_square")
01452             {
01453                 if(ni != 0)
01454                 {
01455                     pval2 = input.data();
01456                     val = -two(learning_rate)*weight_decay;                    
01457                     for(int i=0; i<ni; i++)
01458                     {
01459                         pval1 = goutput.data();
01460                         pval3 = output_indices.data();
01461                         for(int j=0; j<nj; j++)
01462                         {
01463                             gweights((int)(*pval2),(int)*pval3) 
01464                                 += *pval1++ 
01465                                 + val * weights((int)(*pval2),(int)*pval3);
01466                             pval3++;
01467                         }
01468                         pval2++;
01469                     }
01470                 }
01471             }
01472             else if(penalty_type == "L1")
01473             {
01474                 if(ni != 0)
01475                 {
01476                     pval2 = input.data();
01477                     val = -learning_rate*weight_decay;                    
01478                     for(int i=0; i<ni; i++)
01479                     {
01480                         pval1 = goutput.data();
01481                         pval3 = output_indices.data();
01482                         for(int j=0; j<nj; j++)
01483                         {
01484                             val2 = weights((int)(*pval2),(int)*pval3);
01485                             if(val2 > 0)
01486                                 gweights((int)(*pval2),(int)*pval3) 
01487                                     += *pval1 + val;
01488                             else if(val2 < 0)
01489                                 gweights((int)(*pval2),(int)*pval3) 
01490                                     += *pval1 - val;
01491                             pval1++;
01492                             pval3++;
01493                         }
01494                         pval2++;
01495                     }
01496                 }
01497             }
01498         }
01499     }
01500 
01501 //    gradient_penalty(input,weights,bias,gweights,gbias,input_is_sparse,output_is_sparse,
01502 //                     learning_rate,weight_decay,bias_decay,output_indices);
01503 }
01504 
01505 void FeatureSetSequentialCRF::gradient_penalty(Vec input, Mat weights, Vec bias, 
01506                                   Mat gweights, Vec gbias,
01507                                   bool input_is_sparse, bool output_is_sparse,
01508                                   real learning_rate,
01509                                   real weight_decay, real bias_decay,
01510                                   Vec output_indices)
01511 {
01512     // Bias
01513     if(!fast_exact_is_equal(bias_decay, 0) && !fast_exact_is_equal(bias.length(), 0) )
01514     {
01515         if(output_is_sparse)
01516         {
01517             pval1 = gbias.data();
01518             pval2 = bias.data();
01519             pval3 = output_indices.data();
01520             ni = output_indices.length();            
01521             if(penalty_type == "L2_square")
01522             {
01523                 val = -two(learning_rate)*bias_decay;
01524                 for(int i=0; i<ni; i++)
01525                 {
01526                     pval1[(int)*pval3] += val*(pval2[(int)*pval3]);
01527                     pval3++;
01528                 }
01529             }
01530             else if(penalty_type == "L1")
01531             {
01532                 val = -learning_rate*bias_decay;
01533                 for(int i=0; i<ni; i++)
01534                 {
01535                     val2 = pval2[(int)*pval3];
01536                     if(val2 > 0 )
01537                         pval1[(int)*pval3++] += val;
01538                     else if(val2 < 0)
01539                         pval1[(int)*pval3++] -= val;
01540                 }
01541             }
01542         }
01543         else
01544         {
01545             pval1 = gbias.data();
01546             pval2 = bias.data();
01547             ni = output_indices.length();            
01548             if(penalty_type == "L2_square")
01549             {
01550                 val = -two(learning_rate)*bias_decay;
01551                 for(int i=0; i<ni; i++)
01552                     *pval1++ += val*(*pval2++);
01553             }
01554             else if(penalty_type == "L1")
01555             {
01556                 val = -learning_rate*bias_decay;
01557                 for(int i=0; i<ni; i++)
01558                 {
01559                     if(*pval2 > 0)
01560                         *pval1 += val;
01561                     else if(*pval2 < 0)
01562                         *pval1 -= val;
01563                     pval1++;
01564                     pval2++;
01565                 }
01566             }
01567         }
01568     }
01569 
01570     // Weights
01571     if(!fast_exact_is_equal(weight_decay, 0))
01572     {
01573         if(!input_is_sparse && !output_is_sparse)
01574         {      
01575             if(penalty_type == "L2_square")
01576             {
01577                 multiplyAcc(gweights, weights,-two(learning_rate)*weight_decay);
01578             }
01579             else if(penalty_type == "L1")
01580             {
01581                 val = -learning_rate*weight_decay;
01582                 if(gweights.isCompact() && weights.isCompact())
01583                 {
01584                     Mat::compact_iterator itm = gweights.compact_begin();
01585                     Mat::compact_iterator itmend = gweights.compact_end();
01586                     Mat::compact_iterator itx = weights.compact_begin();
01587                     for(; itm!=itmend; ++itm, ++itx)
01588                     {
01589                         if(*itx > 0)
01590                             *itm += val;
01591                         else if(*itx < 0)
01592                             *itm -= val;
01593                     }
01594                 }
01595                 else // use non-compact iterators
01596                 {
01597                     Mat::iterator itm = gweights.begin();
01598                     Mat::iterator itmend = gweights.end();
01599                     Mat::iterator itx = weights.begin();
01600                     for(; itm!=itmend; ++itm, ++itx)
01601                     {
01602                         if(*itx > 0)
01603                             *itm += val;
01604                         else if(*itx < 0)
01605                             *itm -= val;
01606                     }
01607                 }
01608             }
01609         }
01610         else if(!input_is_sparse && output_is_sparse)
01611         {
01612             ni = output_indices.length();
01613             nj = input.length();
01614             pval1 = output_indices.data();
01615 
01616             if(penalty_type == "L2_square")
01617             {
01618                 val = -two(learning_rate)*weight_decay;
01619                 for(int i=0; i<ni; i++)
01620                 {
01621                     for(int j=0; j<nj; j++)
01622                     {
01623                         gweights(j,(int)(*pval1)) += val * weights(j,(int)(*pval1));
01624                     }
01625                     pval1++;
01626                 }
01627             }
01628             else if(penalty_type == "L1")
01629             {
01630                 val = -learning_rate*weight_decay;
01631                 for(int i=0; i<ni; i++)
01632                 {
01633                     for(int j=0; j<nj; j++)
01634                     {
01635                         val2 = weights(j,(int)(*pval1));
01636                         if(val2 > 0)
01637                             gweights(j,(int)(*pval1)) +=  val;
01638                         else if(val2 < 0)
01639                             gweights(j,(int)(*pval1)) -=  val;
01640                     }
01641                     pval1++;
01642                 }
01643             }
01644         }
01645         else if(input_is_sparse && !output_is_sparse)
01646         {
01647             ni = input.length();
01648             nj = output_indices.length();
01649             if(ni != 0)
01650             {
01651                 pval3 = input.data();
01652                 if(penalty_type == "L2_square")
01653                 {
01654                     val = -two(learning_rate)*weight_decay;
01655                     for(int i=0; i<ni; i++)
01656                     {
01657                         pval1 = weights[(int)(*pval3)];
01658                         pval2 = gweights[(int)(*pval3++)];
01659                         for(int j=0; j<nj;j++)
01660                             *pval2++ += val * *pval1++;
01661                     }
01662                 }
01663                 else if(penalty_type == "L1")
01664                 {
01665                     val = -learning_rate*weight_decay;
01666                     for(int i=0; i<ni; i++)
01667                     {
01668                         pval1 = weights[(int)(*pval3)];
01669                         pval2 = gweights[(int)(*pval3++)];
01670                         for(int j=0; j<nj;j++)
01671                         {
01672                             if(*pval1 > 0)
01673                                 *pval2 += val;
01674                             else if(*pval1 < 0)
01675                                 *pval2 -= val;
01676                             pval2++;
01677                             pval1++;
01678                         }
01679                     }                
01680                 }
01681             }
01682         }
01683         else if(input_is_sparse && output_is_sparse)
01684         {
01685             ni = input.length();
01686             nj = output_indices.length();
01687             if(ni != 0)
01688             {
01689                 pval1 = input.data();
01690                 if(penalty_type == "L2_square")
01691                 {
01692                     val = -two(learning_rate)*weight_decay;
01693                     for(int i=0; i<ni; i++)
01694                     {
01695                         pval2 = output_indices.data();
01696                         for(int j=0; j<nj; j++)
01697                         {
01698                             gweights((int)(*pval1),(int)*pval2) += val*weights((int)(*pval1),(int)*pval2);
01699                         pval2++;
01700                         }
01701                         pval1++;
01702                     }
01703                 }
01704                 else if(penalty_type == "L1")
01705                 {
01706                     val = -learning_rate*weight_decay;
01707                     for(int i=0; i<ni; i++)
01708                     {
01709                         pval2 = output_indices.data();
01710                         for(int j=0; j<nj; j++)
01711                         {
01712                             val2 = weights((int)(*pval1),(int)*pval2);
01713                             if(val2 > 0)
01714                                 gweights((int)(*pval1),(int)*pval2) += val;
01715                             else if(val2 < 0)
01716                                 gweights((int)(*pval1),(int)*pval2) -= val;
01717                             pval2++;
01718                         }
01719                         pval1++;
01720                     }
01721                     
01722                 }
01723             }
01724         }
01725     }
01726 }
01727 
01728 void FeatureSetSequentialCRF::compute_softmax(const Vec& x, const Vec& y) const
01729 {
01730     int n = x.length();
01731     
01732 //    real* yp = y.data();
01733 //    real* xp = x.data();
01734 //    for(int i=0; i<n; i++)
01735 //    {
01736 //        *yp++ = *xp > 1e-5 ? *xp : 1e-5;
01737 //        xp++;
01738 //    }
01739 
01740     if (n>0)
01741     {
01742         real* yp = y.data();
01743         real* xp = x.data();
01744         real maxx = max(x);
01745         real s = 0;
01746         for (int i=0;i<n;i++)
01747             s += (*yp++ = safeexp(*xp++-maxx));
01748         if (s == 0) PLERROR("trying to divide by 0 in softmax");
01749         s = 1.0 / s;
01750         yp = y.data();
01751         for (int i=0;i<n;i++)
01752             *yp++ *= s;
01753     }
01754 }
01755 
01756 real FeatureSetSequentialCRF::nll(const Vec& outputv, int target) const
01757 {
01758     return -safeflog(outputv[target]);
01759 }
01760     
01761 real FeatureSetSequentialCRF::classification_loss(const Vec& outputv, int target) const
01762 {
01763     return (argmax(outputv) == target ? 0 : 1);
01764 }
01765 
01766 void FeatureSetSequentialCRF::initializeParams(bool set_seed)
01767 {
01768     if (set_seed) {
01769         if (seed_>=0)
01770             rgen->manual_seed(seed_);
01771     }
01772 
01773 
01774     PP<Dictionary> dict = train_set->getDictionary(inputsize_);
01775     total_output_size = dict->size();
01776 
01777     total_feats_per_token = 0;
01778     for(int i=0; i<n_feat_sets; i++)
01779         total_feats_per_token += feat_sets[i]->size();
01780 
01781     int nnet_inputsize;
01782     if(dist_rep_dim > 0)
01783     {
01784         wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
01785         bout_dist_rep.resize(dist_rep_dim);
01786         nnet_inputsize = dist_rep_dim*inputsize_/n_feat_sets;
01787         nnet_input.resize(nnet_inputsize);
01788 
01789         fillWeights(wout_dist_rep);
01790         bout_dist_rep.clear();
01791 
01792         gradient_wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
01793         gradient_bout_dist_rep.resize(dist_rep_dim);
01794         gradient_nnet_input.resize(nnet_inputsize);
01795         gradient_wout_dist_rep.clear();
01796         gradient_bout_dist_rep.clear();
01797         gradient_nnet_input.clear();
01798     }
01799     else
01800     {
01801         nnet_inputsize = total_feats_per_token*inputsize_/n_feat_sets;
01802         nnet_input = feat_input;
01803     }
01804 
01805     if(nhidden>0) 
01806     {
01807         w1.resize(nnet_inputsize,nhidden);
01808         b1.resize(nhidden);
01809         hiddenv.resize(nhidden);
01810 
01811         fillWeights(w1);
01812         b1.clear();
01813 
01814         gradient_w1.resize(nnet_inputsize,nhidden);
01815         gradient_b1.resize(nhidden);
01816         gradient_hiddenv.resize(nhidden);
01817         gradient_act_hiddenv.resize(nhidden);
01818         gradient_w1.clear();
01819         gradient_b1.clear();
01820         gradient_hiddenv.clear();
01821         gradient_act_hiddenv.clear();
01822         if(nhidden2>0) 
01823         {
01824             w2.resize(nhidden,nhidden2);
01825             b2.resize(nhidden2);
01826             hidden2v.resize(nhidden2);
01827             wout.resize(nhidden2,total_output_size);
01828             bout.resize(total_output_size);
01829 
01830             fillWeights(w2);
01831             b2.clear();
01832 
01833             gradient_w2.resize(nhidden,nhidden2);
01834             gradient_b2.resize(nhidden2);
01835             gradient_hidden2v.resize(nhidden2);
01836             gradient_act_hidden2v.resize(nhidden2);
01837             gradient_wout.resize(nhidden2,total_output_size);
01838             gradient_bout.resize(total_output_size);
01839             gradient_w2.clear();
01840             gradient_b2.clear();
01841             gradient_hidden2v.clear();
01842             gradient_act_hidden2v.clear();
01843             gradient_wout.clear();
01844             gradient_bout.clear();
01845         }
01846         else
01847         {
01848             wout.resize(nhidden,total_output_size);
01849             bout.resize(total_output_size);
01850 
01851             gradient_wout.resize(nhidden,total_output_size);
01852             gradient_bout.resize(total_output_size);
01853             gradient_wout.clear();
01854             gradient_bout.clear();
01855         }
01856             
01857         if(direct_in_to_out)
01858         {
01859             direct_wout.resize(nnet_inputsize,total_output_size);
01860             direct_bout.resize(0); // Because it is not used
01861 
01862             fillWeights(direct_wout);
01863                 
01864             gradient_direct_wout.resize(nnet_inputsize,total_output_size);
01865             gradient_direct_wout.clear();
01866             gradient_direct_bout.resize(0); // idem
01867         }
01868     }
01869     else
01870     {
01871         wout.resize(nnet_inputsize,total_output_size);
01872         bout.resize(total_output_size);
01873 
01874         gradient_wout.resize(nnet_inputsize,total_output_size);
01875         gradient_bout.resize(total_output_size);
01876         gradient_wout.clear();
01877         gradient_bout.clear();
01878     }
01879 
01880     //fillWeights(wout);
01881     
01882     if (fixed_output_weights) {
01883         static Vec values;
01884         if (values.size()==0)
01885         {
01886             values.resize(2);
01887             values[0]=-1;
01888             values[1]=1;
01889         }
01890         rgen->fill_random_discrete(wout.toVec(), values);
01891     }
01892     else 
01893         fillWeights(wout);
01894 
01895     bout.clear();
01896 
01897     gradient_outputv.resize(total_output_size);
01898     gradient_act_outputv.resize(total_output_size);
01899     gradient_outputv.clear();
01900     gradient_act_outputv.clear();
01901 }
01902 
01904 // makeDeepCopyFromShallowCopy //
01906 void FeatureSetSequentialCRF::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01907 {
01908     inherited::makeDeepCopyFromShallowCopy(copies);
01909 
01910     // Private variables
01911     deepCopyField(target_values,copies);
01912     deepCopyField(output_comp,copies);
01913     deepCopyField(row,copies);
01914     deepCopyField(last_layer,copies);
01915     deepCopyField(gradient_last_layer,copies);
01916     deepCopyField(feats,copies);
01917     deepCopyField(gradient,copies);
01918 
01919     // Protected variables
01920     deepCopyField(feat_input,copies);
01921     deepCopyField(gradient_feat_input,copies);
01922     deepCopyField(nnet_input,copies);
01923     deepCopyField(gradient_nnet_input,copies);
01924     deepCopyField(hiddenv,copies);
01925     deepCopyField(gradient_hiddenv,copies);
01926     deepCopyField(gradient_act_hiddenv,copies);
01927     deepCopyField(hidden2v,copies);
01928     deepCopyField(gradient_hidden2v,copies);
01929     deepCopyField(gradient_act_hidden2v,copies);
01930     deepCopyField(gradient_outputv,copies);
01931     deepCopyField(gradient_act_outputv,copies);
01932     deepCopyField(feats_since_last_update,copies);
01933     deepCopyField(target_values_since_last_update,copies);
01934     deepCopyField(val_string_reference_set,copies);
01935     deepCopyField(target_values_reference_set,copies);
01936 
01937     // Public variables
01938     deepCopyField(w1,copies);
01939     deepCopyField(gradient_w1,copies);
01940     deepCopyField(b1,copies);
01941     deepCopyField(gradient_b1,copies);
01942     deepCopyField(w2,copies);
01943     deepCopyField(gradient_w2,copies);
01944     deepCopyField(b2,copies);
01945     deepCopyField(gradient_b2,copies);
01946     deepCopyField(wout,copies);
01947     deepCopyField(gradient_wout,copies);
01948     deepCopyField(bout,copies);
01949     deepCopyField(gradient_bout,copies);
01950     deepCopyField(direct_wout,copies);
01951     deepCopyField(gradient_direct_wout,copies);
01952     deepCopyField(direct_bout,copies);
01953     deepCopyField(gradient_direct_bout,copies);
01954     deepCopyField(wout_dist_rep,copies);
01955     deepCopyField(gradient_wout_dist_rep,copies);
01956     deepCopyField(bout_dist_rep,copies);
01957     deepCopyField(gradient_bout_dist_rep,copies);
01958 
01959     // Public build options
01960     deepCopyField(cost_funcs,copies);
01961     deepCopyField(feat_sets,copies);
01962 }
01963 
01965 // outputsize //
01967 int FeatureSetSequentialCRF::outputsize() const {
01968     return targetsize_;
01969 }
01970 
01972 // train //
01974 void FeatureSetSequentialCRF::train()
01975 {
01976     //Profiler::activate();
01977 //    if(!train_set)
01978 //        PLERROR("In FeatureSetSequentialCRF::train, you did not setTrainingSet");
01979 //
01980 //    if(!train_stats)
01981 //        PLERROR("In FeatureSetSequentialCRF::train, you did not setTrainStatsCollector");
01982 // 
01983 //    Vec outputv(total_output_size);
01984 //    Vec costsv(getTrainCostNames().length());
01985 //    Vec inputv(train_set->inputsize());
01986 //    Vec targetv(train_set->targetsize());
01987 //    real sample_weight=1;
01988 //
01989 //
01990 //    int l = train_set->length();  
01991 //    int bs = batch_size>0 ? batch_size : l;
01992 //
01993 //    PP<ProgressBar> pb;
01994 //    if(report_progress)
01995 //        pb = new ProgressBar("Training " + classname() + " from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01996 
01997 //    Mat old_gradient_wout;
01998 //    Vec old_gradient_bout;
01999 //    Mat old_gradient_wout_dist_rep;
02000 //    Vec old_gradient_bout_dist_rep;
02001 //    Mat old_gradient_w1;
02002 //    Vec old_gradient_b1;
02003 //    Mat old_gradient_w2;
02004 //    Vec old_gradient_b2;
02005 //    Mat old_gradient_direct_wout;
02006 //
02007 //    if(stochastic_gradient_descent_speedup)
02008 //    {
02009 //        // Trick to make stochastic gradient descent faster
02010 //
02011 //        old_gradient_wout = gradient_wout;
02012 //        old_gradient_bout = gradient_bout;
02013 //        gradient_wout = wout;
02014 //        gradient_bout = bout;
02015 //        
02016 //        if(dist_rep_dim > 0)
02017 //        {
02018 //            old_gradient_wout_dist_rep = gradient_wout_dist_rep;
02019 //            old_gradient_bout_dist_rep = gradient_bout_dist_rep;
02020 //            gradient_wout_dist_rep = wout_dist_rep;
02021 //            gradient_bout_dist_rep = bout_dist_rep;
02022 //        }
02023 //
02024 //        if(nhidden>0) 
02025 //        {
02026 //            old_gradient_w1 = gradient_w1;
02027 //            old_gradient_b1 = gradient_b1;
02028 //            gradient_w1 = w1;
02029 //            gradient_b1 = b1;
02030 //            if(nhidden2>0) 
02031 //            {
02032 //                old_gradient_w2 = gradient_w2;
02033 //                old_gradient_b2 = gradient_b2;
02034 //                gradient_w2 = w2;
02035 //                gradient_b2 = b2;
02036 //            }
02037 //            
02038 //            if(direct_in_to_out)
02039 //            {
02040 //                old_gradient_direct_wout = gradient_direct_wout;
02041 //                gradient_direct_wout = direct_wout;
02042 //            }
02043 //        }
02044 //    }
02045 
02046     // Exemple TMat
02047     // Mat blu(10,40);
02048     // blu.resize(3,6);
02049     // blu(2,3) <- élément à la position 2,3, partant de 0,0
02050     // blu[1]   <- pointer real* vers la 2e rangée (rangée 1)
02051     // 
02052     // Note: tu peux faire des TVec< TVec< TVec<... > > >
02053     
02054     // Supposer qu'ils sont définis...
02055     TVec<int> delimiters;
02056 
02057     int initial_stage = stage;
02058     while(stage<nstages)
02059     {
02060         for(int t=0; t<l;)
02061         {
02062             
02063             // Coder ici Alexounet alpha-beta 
02064 
02065             // Update
02066             //if(!stochastic_gradient_descent_speedup)
02067             //    update();
02068             //total_updates++;
02069         }
02070 //        train_stats->finalize();
02071 //        ++stage;
02072 //        if(verbosity>2)
02073 //            cout << "Epoch " << stage << " train objective: " 
02074 //                 << train_stats->getMean() << endl;
02075 //        if(pb) pb->update(stage-initial_stage);
02076     }
02077 
02078 //    if(stochastic_gradient_descent_speedup)
02079 //    {
02080 //        // Trick to make stochastic gradient descent faster
02081 //
02082 //        gradient_wout = old_gradient_wout;
02083 //        gradient_bout = old_gradient_bout;
02084 //        
02085 //        if(dist_rep_dim > 0)
02086 //        {
02087 //            gradient_wout_dist_rep = old_gradient_wout_dist_rep;
02088 //            gradient_bout_dist_rep = old_gradient_bout_dist_rep;
02089 //        }
02090 //
02091 //        if(nhidden>0) 
02092 //        {
02093 //            gradient_w1 = old_gradient_w1;
02094 //            gradient_b1 = old_gradient_b1;
02095 //            if(nhidden2>0) 
02096 //            {
02097 //                gradient_w2 = old_gradient_w2;
02098 //                gradient_b2 = old_gradient_b2;
02099 //            }
02100 //            
02101 //            if(direct_in_to_out)
02102 //            {
02103 //                gradient_direct_wout = old_gradient_direct_wout;
02104 //            }
02105 //        }
02106 //    }
02107     //Profiler::report(cout);
02108 }
02109 
02110 void FeatureSetSequentialCRF::verify_gradient(Vec& input, Vec targetv, real step)
02111 {
02112     Vec costsv(getTrainCostNames().length());
02113     real sampleweight = 1;
02114     real verify_step = step;
02115     
02116     // To avoid the interaction between fprop and this function
02117     int nfeats = 0;
02118     int id = 0;
02119     int ifeats = 0;
02120 
02121     Vec est_gradient_bout;
02122     Mat est_gradient_wout;
02123     Vec est_gradient_bout_dist_rep;
02124     Mat est_gradient_wout_dist_rep;
02125     Vec est_gradient_b1;
02126     Mat est_gradient_w1;
02127     Vec est_gradient_b2;
02128     Mat est_gradient_w2;
02129     Vec est_gradient_direct_bout;
02130     Mat est_gradient_direct_wout;
02131 
02132     int nnet_inputsize;
02133     if(dist_rep_dim > 0)
02134     {
02135         nnet_inputsize = dist_rep_dim*inputsize_/n_feat_sets;
02136         est_gradient_wout_dist_rep.resize(total_feats_per_token,dist_rep_dim);
02137         est_gradient_bout_dist_rep.resize(dist_rep_dim);
02138         est_gradient_wout_dist_rep.clear();
02139         est_gradient_bout_dist_rep.clear();
02140         gradient_wout_dist_rep.clear();
02141         gradient_bout_dist_rep.clear();
02142     }
02143     else
02144     {
02145         nnet_inputsize = total_feats_per_token*inputsize_/n_feat_sets;
02146     }
02147 
02148     if(nhidden>0) 
02149     {
02150         est_gradient_w1.resize(nnet_inputsize,nhidden);
02151         est_gradient_b1.resize(nhidden);
02152         est_gradient_w1.clear();
02153         est_gradient_b1.clear();
02154         gradient_w1.clear();
02155         gradient_b1.clear();
02156         if(nhidden2>0) 
02157         {
02158             est_gradient_w2.resize(nhidden,nhidden2);
02159             est_gradient_b2.resize(nhidden2);
02160             est_gradient_wout.resize(nhidden2,total_output_size);
02161             est_gradient_bout.resize(total_output_size);
02162             est_gradient_w2.clear();
02163             est_gradient_b2.clear();
02164             est_gradient_wout.clear();
02165             est_gradient_bout.clear();
02166             gradient_w2.clear();
02167             gradient_b2.clear();
02168             gradient_wout.clear();
02169             gradient_bout.clear();
02170         }
02171         else
02172         {
02173             est_gradient_wout.resize(nhidden,total_output_size);
02174             est_gradient_bout.resize(total_output_size);
02175             est_gradient_wout.clear();
02176             est_gradient_bout.clear();
02177             gradient_wout.clear();
02178             gradient_bout.clear();
02179         }
02180             
02181         if(direct_in_to_out)
02182         {
02183             est_gradient_direct_wout.resize(nnet_inputsize,total_output_size);
02184             est_gradient_direct_wout.clear();
02185             est_gradient_direct_bout.resize(0); // idem
02186             gradient_direct_wout.clear();                        
02187         }
02188     }
02189     else
02190     {
02191         est_gradient_wout.resize(nnet_inputsize,total_output_size);
02192         est_gradient_bout.resize(total_output_size);
02193         est_gradient_wout.clear();
02194         est_gradient_bout.clear();
02195         gradient_wout.clear();
02196         gradient_bout.clear();
02197     }
02198 
02199     fprop(input, output_comp, targetv, costsv);
02200     bprop(input,output_comp,targetv,costsv,
02201           -1, sampleweight);
02202     clearProppathGradient();
02203     
02204     // Compute estimated gradient
02205 
02206     if(dist_rep_dim > 0) 
02207     {        
02208         nfeats = 0;
02209         id = 0;
02210         for(int i=0; i<inputsize_;)
02211         {
02212             ifeats = 0;
02213             for(int j=0; j<n_feat_sets; j++,i++)
02214                 ifeats += feats[i].length();
02215             verify_gradient_affine_transform(
02216                 input,output_comp, targetv, costsv, sampleweight,
02217                 feat_input.subVec(nfeats,ifeats),
02218                 wout_dist_rep, bout_dist_rep,
02219                 est_gradient_wout_dist_rep, est_gradient_bout_dist_rep,
02220                 true, false, verify_step);
02221             nfeats += ifeats;
02222             id++;
02223         }
02224 
02225         cout << "Verify wout_dist_rep" << endl;
02226         output_gradient_verification(gradient_wout_dist_rep.toVec(), est_gradient_wout_dist_rep.toVec());
02227         cout << "Verify bout_dist_rep" << endl;
02228         output_gradient_verification(gradient_bout_dist_rep, est_gradient_bout_dist_rep);
02229         gradient_wout_dist_rep.clear();
02230         gradient_bout_dist_rep.clear();
02231 
02232         if(nhidden>0) 
02233         {
02234             verify_gradient_affine_transform(
02235                 input,output_comp, targetv, costsv, sampleweight,
02236                 nnet_input,w1,b1,
02237                 est_gradient_w1, est_gradient_b1, false,false, verify_step);
02238 
02239             cout << "Verify w1" << endl;
02240             output_gradient_verification(gradient_w1.toVec(), est_gradient_w1.toVec());
02241             cout << "Verify b1" << endl;
02242             output_gradient_verification(gradient_b1, est_gradient_b1);
02243             
02244             if(nhidden2>0) 
02245             {
02246                 verify_gradient_affine_transform(
02247                     input,output_comp, targetv, costsv, sampleweight,    
02248                     hiddenv,w2,b2,
02249                     est_gradient_w2, est_gradient_b2,
02250                     false,false, verify_step);
02251                 cout << "Verify w2" << endl;
02252                 output_gradient_verification(gradient_w2.toVec(), est_gradient_w2.toVec());
02253                 cout << "Verify b2" << endl;
02254                 output_gradient_verification(gradient_b2, est_gradient_b2);
02255 
02256                 last_layer = hidden2v;
02257             }
02258             else
02259                 last_layer = hiddenv;
02260         }
02261         else
02262             last_layer = nnet_input;
02263 
02264         verify_gradient_affine_transform(
02265             input,output_comp, targetv, costsv, sampleweight,
02266             last_layer,wout,bout,
02267             est_gradient_wout, est_gradient_bout, false,
02268             possible_targets_vary,verify_step,target_values);
02269 
02270         cout << "Verify wout" << endl;
02271         output_gradient_verification(gradient_wout.toVec(), est_gradient_wout.toVec());
02272         cout << "Verify bout" << endl;
02273         output_gradient_verification(gradient_bout, est_gradient_bout);
02274  
02275         if(direct_in_to_out && nhidden>0)
02276         {
02277             verify_gradient_affine_transform(
02278                 input,output_comp, targetv, costsv, sampleweight,
02279                 nnet_input,direct_wout,direct_bout,
02280                 est_gradient_direct_wout, est_gradient_direct_bout,false,
02281                 possible_targets_vary, verify_step, target_values);
02282             cout << "Verify direct_wout" << endl;
02283             output_gradient_verification(gradient_direct_wout.toVec(), est_gradient_direct_wout.toVec());
02284             //cout << "Verify direct_bout" << endl;
02285             //output_gradient_verification(gradient_direct_bout, est_gradient_direct_bout);
02286         }
02287     }
02288     else
02289     {        
02290         if(nhidden>0)
02291         {
02292             verify_gradient_affine_transform(
02293                 input,output_comp, targetv, costsv, sampleweight,
02294                 feat_input,w1,b1,
02295                 est_gradient_w1, est_gradient_b1,
02296                 true,false, verify_step);
02297 
02298             cout << "Verify w1" << endl;
02299             output_gradient_verification(gradient_w1.toVec(), est_gradient_w1.toVec());
02300             cout << "Verify b1" << endl;
02301             output_gradient_verification(gradient_b1, est_gradient_b1);
02302 
02303             if(nhidden2>0)
02304             {
02305                 verify_gradient_affine_transform(
02306                     input,output_comp, targetv, costsv, sampleweight,
02307                     hiddenv,w2,b2,
02308                     est_gradient_w2, est_gradient_b2,true,false,
02309                     verify_step);
02310 
02311                 cout << "Verify w2" << endl;
02312                 output_gradient_verification(gradient_w2.toVec(), est_gradient_w2.toVec());
02313                 cout << "Verify b2" << endl;
02314                 output_gradient_verification(gradient_b2, est_gradient_b2);
02315                 
02316                 last_layer = hidden2v;
02317             }
02318             else
02319                 last_layer = hiddenv;
02320         }
02321         else
02322             last_layer = feat_input;
02323         
02324         verify_gradient_affine_transform(
02325             input,output_comp, targetv, costsv, sampleweight,
02326             last_layer,wout,bout,
02327             est_gradient_wout, est_gradient_bout, nhidden<=0,
02328             possible_targets_vary,verify_step, target_values);
02329 
02330         cout << "Verify wout" << endl;
02331         output_gradient_verification(gradient_wout.toVec(), est_gradient_wout.toVec());
02332         cout << "Verify bout" << endl;
02333         output_gradient_verification(gradient_bout, est_gradient_bout);
02334         
02335         if(direct_in_to_out && nhidden>0)
02336         {
02337             verify_gradient_affine_transform(
02338                 input,output_comp, targetv, costsv, sampleweight,
02339                 feat_input,direct_wout,direct_bout,
02340                 est_gradient_wout, est_gradient_bout,true,
02341                 possible_targets_vary, verify_step,target_values);
02342             cout << "Verify direct_wout" << endl;
02343             output_gradient_verification(gradient_direct_wout.toVec(), est_gradient_direct_wout.toVec());
02344             cout << "Verify direct_bout" << endl;
02345             output_gradient_verification(gradient_direct_bout, est_gradient_direct_bout);
02346         }
02347     }
02348 
02349 }
02350 
02351 void FeatureSetSequentialCRF::verify_gradient_affine_transform(
02352     Vec global_input, Vec& global_output, Vec& global_targetv,
02353     Vec& global_costs, real sampleweight,
02354     Vec input, Mat weights, Vec bias,
02355     Mat est_gweights, Vec est_gbias,  
02356     bool input_is_sparse, bool output_is_sparse,
02357     real step,
02358     Vec output_indices) const
02359 {
02360     real *pval1, *pval2, *pval3;
02361     int ni,nj;
02362     real out1,out2;
02363     // Bias
02364     if(bias.length() != 0)
02365     {
02366         if(output_is_sparse)
02367         {
02368             pval1 = est_gbias.data();
02369             pval2 = bias.data();
02370             pval3 = output_indices.data();
02371             ni = output_indices.length();
02372             for(int i=0; i<ni; i++)
02373             {
02374                 pval2[(int)*pval3] += step;
02375                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02376                 out1 = global_costs[0];
02377                 pval2[(int)*pval3] -= 2*step;
02378                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02379                 out2 = global_costs[0];
02380                 pval1[(int)*pval3] = (out1-out2)/(2*step);
02381                 pval2[(int)*pval3] += step;
02382                 pval3++;
02383             }
02384         }
02385         else
02386         {
02387             pval1 = est_gbias.data();
02388             pval2 = bias.data();
02389             ni = bias.length();
02390             for(int i=0; i<ni; i++)
02391             {
02392                 *pval2 += step;
02393                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02394                 out1 = global_costs[0];
02395                 *pval2 -= 2*step;
02396                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02397                 out2 = global_costs[0];
02398                 *pval1 = (out1-out2)/(2*step);
02399                 *pval2 += step;
02400                 pval1++; 
02401                 pval2++;
02402             }
02403         }
02404     }
02405 
02406     // Weights
02407     if(!input_is_sparse && !output_is_sparse)
02408     {
02409         ni = weights.length();
02410         nj = weights.width();
02411         for(int i=0; i<ni; i++)
02412             for(int j=0; j<nj; j++)
02413             {
02414                 weights(i,j) += step;
02415                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02416                 out1 = global_costs[0];
02417                 weights(i,j) -= 2*step;
02418                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02419                 out2 = global_costs[0];
02420                 weights(i,j) += step;
02421                 est_gweights(i,j) = (out1-out2)/(2*step);
02422             }
02423     }
02424     else if(!input_is_sparse && output_is_sparse)
02425     {
02426         ni = output_indices.length();
02427         nj = input.length();
02428         pval3 = output_indices.data();
02429         for(int i=0; i<ni; i++)
02430         {
02431             for(int j=0; j<nj; j++)
02432             {
02433                 weights(j,(int)*pval3) += step;
02434                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02435                 out1 = global_costs[0];
02436                 weights(j,(int)*pval3) -= 2*step;
02437                 fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02438                 out2 = global_costs[0];
02439                 weights(j,(int)*pval3) += step;
02440                 est_gweights(j,(int)*pval3) = (out1-out2)/(2*step);
02441 //                if(target_values.length() != 1 && input[j] != 0 && (out1-out2)/(2*step) == 0)
02442 //                {                    
02443 //                    print_what_the_fuck();
02444 //                    weights(j,(int)*pval3) += 1;
02445 //                    fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02446 //                    weights(j,(int)*pval3) -= 1;
02447 //                    cout << "out1 - global_costs[0] =" << out1-global_costs[0] << endl;
02448 //                }
02449             }
02450             pval3++;
02451         }
02452     }
02453     else if(input_is_sparse && !output_is_sparse)
02454     {
02455         ni = input.length();
02456         nj = weights.width();
02457         if(ni != 0 )
02458         {
02459             pval3 = input.data();
02460             for(int i=0; i<ni; i++)
02461             {
02462                 pval1 = est_gweights[(int)(*pval3)];
02463                 pval2 = weights[(int)(*pval3++)];
02464                 for(int j=0; j<nj;j++)
02465                 {
02466                     *pval2 += step;
02467                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02468                     out1 = global_costs[0];
02469                     *pval2 -= 2*step;
02470                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02471                     out2 = global_costs[0];
02472                     *pval1 = (out1-out2)/(2*step);
02473                     *pval2 += step;
02474                     pval1++;
02475                     pval2++;
02476                 }
02477             }
02478         }
02479     }
02480     else if(input_is_sparse && output_is_sparse)
02481     {
02482         // Weights
02483         ni = input.length();
02484         nj = output_indices.length();
02485         if(ni != 0)
02486         {
02487             pval2 = input.data();
02488             for(int i=0; i<ni; i++)
02489             {
02490                 pval3 = output_indices.data();
02491                 for(int j=0; j<nj; j++)
02492                 {
02493                     weights((int)(*pval2),(int)*pval3) += step;
02494                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02495                     out1 = global_costs[0];
02496                     weights((int)(*pval2),(int)*pval3) -= 2*step;
02497                     fprop(global_input, global_output, global_targetv, global_costs, sampleweight);
02498                     out2 = global_costs[0];
02499                     est_gweights((int)(*pval2),(int)*pval3)  = (out1-out2)/(2*step);
02500                     weights((int)(*pval2),(int)*pval3) += step;
02501                     pval3++;
02502                 }
02503                 pval2++;
02504             }
02505         }
02506     }
02507 }
02508 
02509 
02510 void FeatureSetSequentialCRF::output_gradient_verification(Vec grad, Vec est_grad)
02511 {
02512     // Inspired from Func::verifyGradient()
02513 
02514     Vec num = apply(grad - est_grad,(tRealFunc)FABS);
02515     Vec denom = real(0.5)*apply(grad + est_grad,(tRealFunc)FABS);
02516     for (int i = 0; i < num.length(); i++)
02517     {
02518         if (!fast_exact_is_equal(num[i], 0))
02519             num[i] /= denom[i];
02520         else
02521             if(!fast_exact_is_equal(denom[i],0))
02522                 cout << "at position " << i << " num[i] == 0 but denom[i] = " << denom[i] << endl;
02523     }
02524     int pos = argmax(num);
02525     cout << max(num) << " (at position " << pos << "/" << num.length()
02526          << ", computed = " << grad[pos] << " and estimated = "
02527          << est_grad[pos] << ")" << endl;
02528 
02529     real norm_grad = norm(grad);
02530     real norm_est_grad = norm(est_grad);
02531     real cos_angle = fast_exact_is_equal(norm_grad*norm_est_grad,
02532                                          0)
02533         ? MISSING_VALUE
02534         : dot(grad,est_grad) /
02535         (norm_grad*norm_est_grad);
02536     if (cos_angle > 1)
02537         cos_angle = 1;      // Numerical imprecisions can lead to such situation.
02538     cout << "grad.length() = " << grad.length() << endl;
02539     cout << "cos(angle) : " << cos_angle << endl;
02540     cout << "angle : " << ( is_missing(cos_angle) ? MISSING_VALUE
02541                             : acos(cos_angle) ) << endl;
02542 }
02543 
02544 void FeatureSetSequentialCRF::batchComputeOutputAndConfidence(VMat inputs, real probability,
02545                                          VMat outputs_and_confidence) const
02546 {
02547     val_string_reference_set = inputs;
02548     inherited::batchComputeOutputAndConfidence(inputs,probability,outputs_and_confidence);
02549     val_string_reference_set = train_set;
02550 }
02551 
02552 void FeatureSetSequentialCRF::use(VMat testset, VMat outputs) const
02553 {
02554     val_string_reference_set = testset;
02555     if(testset->width() > train_set->inputsize())
02556         target_values_reference_set = testset;
02557     target_values_reference_set = testset;
02558     inherited::use(testset,outputs);
02559     val_string_reference_set = train_set;
02560     if(testset->width() > train_set->inputsize())
02561         target_values_reference_set = train_set;
02562 }
02563 
02564 void FeatureSetSequentialCRF::test(VMat testset, PP<VecStatsCollector> test_stats, 
02565                       VMat testoutputs, VMat testcosts) const
02566 {
02567     val_string_reference_set = testset;
02568     target_values_reference_set = testset;
02569     inherited::test(testset,test_stats,testoutputs,testcosts);
02570     val_string_reference_set = train_set;
02571     target_values_reference_set = train_set;
02572 }
02573 
02574 VMat FeatureSetSequentialCRF::processDataSet(VMat dataset) const
02575 {
02576     VMat ret;
02577     val_string_reference_set = dataset;
02578     // Assumes it contains the target part information
02579     if(dataset->width() > train_set->inputsize())
02580         target_values_reference_set = dataset;
02581     ret = inherited::processDataSet(dataset);
02582     val_string_reference_set = train_set;
02583     if(dataset->width() > train_set->inputsize())
02584         target_values_reference_set = train_set;
02585     return ret;
02586 }
02587 
02588 } // end of namespace PLearn
02589 
02590 
02591 /*
02592   Local Variables:
02593   mode:c++
02594   c-basic-offset:4
02595   c-file-style:"stroustrup"
02596   c-file-offsets:((innamespace . 0)(inline-open . 0))
02597   indent-tabs-mode:nil
02598   fill-column:79
02599   End:
02600 */
02601 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines