PLearn 0.1
StructuralLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // StructuralLearner.cc
00004 //
00005 // Copyright (C) 2006 Pierre-Antoine Manzagol 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036    * $Id: .pyskeleton_header 544 2003-09-01 00:05:31Z plearner $ 
00037    ******************************************************* */
00038 
00039 // Authors: Pierre-Antoine Manzagol
00040 
00044 #include "StructuralLearner.h"
00045 #include <plearn/math/plapack.h>
00046 #include <plearn/math/random.h>
00047 //#include <plearn/sys/Profiler.h>
00048 #include <map>
00049 #include <vector>
00050 #include <algorithm>
00051 
00052 // PA - used for debugging
00053 #define USE_PA_DEBUG
00054 
00055 #ifdef USE_PA_DEBUG
00056 #define PA_DEBUG(x) x
00057 #else
00058 #define PA_DEBUG(x)
00059 #endif
00060 
00061 
00062 
00063 // *** Used to determine most frequent words in auxiliary set ***
00064 class freqCount {
00065 
00066 public:
00067   freqCount(int wt, unsigned long int c) : wordtag(wt), count(c){};
00068   int wordtag;
00069   unsigned long int count;
00070 };
00071 
00072 bool freqCountGT(const freqCount &a, const freqCount &b) 
00073 {
00074     return a.count > b.count;
00075 }
00076 
00077 
00078 namespace PLearn {
00079 using namespace std;
00080 
00081 PLEARN_IMPLEMENT_OBJECT(
00082     StructuralLearner,
00083     "ONE LINE DESCRIPTION",
00084     "MULTI-LINE \nHELP");
00085 
00086 StructuralLearner::StructuralLearner() 
00087 /* ### Initialize all fields to their default value here */
00088 {
00089 
00090   std::cerr << "StructuralLearner::StructuralLearner()" << std::endl;
00091 
00092   // With these values, will not learn
00093   start_learning_rate=0.01;
00094   decrease_constant=0.0;
00095   lambda=1e-5;
00096   index_O = 0;
00097   nhidden = 0;
00098   separate_features = 1;
00099   n_auxiliary_wordproblems = 100;
00100   epsilon = 1e-4;
00101   max_stage = INT_MAX;
00102   use_thetas_for_output_weights=1;
00103   use_thetas_for_hidden_weights=0;
00104 
00105   //m_tvec_auxiliaryLearners.resize(0);
00106   
00107   // ### You may (or not) want to call build_() to finish building the object
00108   // ### (doing so assumes the parent classes' build_() have been called too
00109   // ### in the parent classes' constructors, something that you must ensure)
00110 }
00111 
00112 void StructuralLearner::declareOptions(OptionList& ol)
00113 {
00114     declareOption(ol, "ws", &StructuralLearner::ws, OptionBase::learntoption,
00115                    "Weights of the linear classifier: f(x) = wt x + vt theta x");
00116     declareOption(ol, "vs", &StructuralLearner::vs, OptionBase::learntoption,
00117                    "Weights of the linear classifier: f(x) = wt x + vt theta x");
00118     declareOption(ol, "whids", &StructuralLearner::whids, OptionBase::learntoption,
00119                    "Weights from input to hidden layers (one for each feature group)");
00120     declareOption(ol, "vhids", &StructuralLearner::whids, OptionBase::learntoption,
00121                    "Weights for the thetahids projections, for the layers (one for each feature group)");
00122     declareOption(ol, "thetas", &StructuralLearner::thetas, OptionBase::learntoption,
00123                    "structure parameter of the linear classifier: f(x) = wt x + vt theta x");
00124     declareOption(ol, "thetahids", &StructuralLearner::thetahids, OptionBase::learntoption,
00125                    "structure parameter of the linear classifier: f(x) = wt x + vt theta x");
00126     declareOption(ol, "start_learning_rate", &StructuralLearner::start_learning_rate, OptionBase::buildoption,
00127                    "Starting learning rate of the stochastic gradient descent");
00128     declareOption(ol, "decrease_constant", &StructuralLearner::decrease_constant, OptionBase::buildoption,
00129                    "Decrease constant of the stochastic learning rate");
00130     declareOption(ol, "best_error", &StructuralLearner::best_error, OptionBase::learntoption,
00131                    "Best training error, when training model before SVD");
00132     declareOption(ol, "current_error", &StructuralLearner::current_error, OptionBase::learntoption,
00133                    "Current training error, when training model before SVD");
00134     declareOption(ol, "auxiliary_task_train_set", &StructuralLearner::auxiliary_task_train_set, OptionBase::buildoption,
00135                    "Training set for auxiliary task");
00136     declareOption(ol, "epsilon", &StructuralLearner::epsilon, OptionBase::buildoption,
00137                    "Threshold to determine convergence of stochastic descent");
00138     declareOption(ol, "lambda", &StructuralLearner::lambda, OptionBase::buildoption,
00139                    "Weight decay for output weights");
00140     declareOption(ol, "nhidden", &StructuralLearner::nhidden, OptionBase::buildoption,
00141                    "Number of hidden neurons in the hidden layers");
00142     declareOption(ol, "use_thetas_for_output_weights", &StructuralLearner::use_thetas_for_output_weights, OptionBase::buildoption,
00143                    "Indication that structural parameters for the output weights should be used for the neural network");
00144      declareOption(ol, "use_thetas_for_hidden_weights", &StructuralLearner::use_thetas_for_hidden_weights, OptionBase::buildoption,
00145                    "Indication that structural parameters for the hidden weights should be used for the neural network");
00146     declareOption(ol, "max_stage", &StructuralLearner::max_stage, OptionBase::buildoption,
00147                    "Maximum number of stages when training the model to find the thetas");
00148     declareOption(ol, "index_O", &StructuralLearner::index_O, OptionBase::buildoption,
00149                    "Index of the \"O\" (abstention) symbol");
00150     declareOption(ol, "separate_features", &StructuralLearner::separate_features, OptionBase::buildoption,
00151                    "Indication that the features should be separated into groups");
00152     declareOption(ol, "abstention_threshold", &StructuralLearner::abstention_threshold, OptionBase::buildoption,
00153                    "Threshold on the probability of the index_O symbol below which the predictor should not abstain");
00154     declareOption(ol, "n_auxiliary_wordproblems", &StructuralLearner::n_auxiliary_wordproblems, OptionBase::buildoption,
00155                    "Number of most frequent words that are to be predicted.");
00156 
00157     // Now call the parent class' declareOptions
00158     inherited::declareOptions(ol);
00159 }
00160 
00161 void StructuralLearner::buildTasksParameters(int nout, TVec<unsigned int> feat_lengths)
00162 {
00163 
00164     before_softmax.resize(nout);
00165     output.resize(nout);
00166     good_class_softmax_gradient.resize(nout);
00167     bad_class_softmax_gradient.resize(nout);    
00168    
00169     if(!separate_features)
00170     {
00171         if(nhidden <= 0 || use_thetas_for_output_weights)
00172             vs.resize( 1 );
00173         else
00174             vs.resize( 0 );
00175 
00176         if(nhidden > 0) 
00177         {
00178             ws.resize(1);
00179             whids.resize( feat_lengths.length() );
00180             if(use_thetas_for_hidden_weights)
00181                 vhids.resize( 1 );
00182             else
00183                 vhids.resize( 0 );
00184             vs_times_thetas.resize(1);
00185         }
00186         else
00187             ws.resize(feat_lengths.length());
00188     }
00189     else
00190     {
00191         ws.resize( feat_lengths.length() );
00192 
00193         if(nhidden <= 0 || use_thetas_for_output_weights)
00194             vs.resize( feat_lengths.length()-3);
00195         else
00196             vs.resize( 0 );
00197 
00198         if(nhidden > 0)
00199         {
00200             whids.resize( feat_lengths.length() );
00201             if(use_thetas_for_hidden_weights)
00202                 vhids.resize( feat_lengths.length()-3);
00203             else
00204                 vhids.resize( 0 );
00205             vs_times_thetas.resize(feat_lengths.length()-3);
00206         }
00207     }
00208 
00209     for(int i=0; i<ws.length(); i++)  {
00210         if(nhidden>0)
00211             ws[i].resize( nout, nhidden +1);  // +1 for the bias
00212         else
00213             ws[i].resize( nout, feat_lengths[i] ); // bias is included in features...
00214     }
00215 
00216     for(int i=0; i<vs.length(); i++)  {
00217         vs[i].resize( nout, 50 );
00218     }
00219 
00220     for(int i=0; i<whids.length(); i++)  {
00221         whids[i].resize(nhidden,feat_lengths[i]);
00222     }
00223 
00224     for(int i=0; i<vhids.length(); i++)  {
00225         vhids[i].resize(nhidden,50);
00226     }
00227     
00228 
00229     for(int i=0; i<vs_times_thetas.length(); i++)
00230         vs_times_thetas[i].resize(nout,nhidden);
00231 
00232     if(nhidden > 0)
00233     {
00234         if(separate_features)
00235         {
00236             activations.resize(nhidden+1,feat_lengths.length()); // +1 for the bias
00237             activations_gradient.resize(nhidden+1,feat_lengths.length()); // +1 for the bias
00238         }
00239         else
00240         {
00241             activations.resize(nhidden+1,1); // idem
00242             activations_gradient.resize(nhidden+1,1); // idem
00243         }
00244     }
00245 }
00246 
00247 void StructuralLearner::buildThetaParameters(TVec<unsigned int> feat_lengths)
00248 {
00249     if(separate_features)
00250     {
00251         if(nhidden <= 0 || use_thetas_for_output_weights)
00252         {
00253             thetas.resize( feat_lengths.length()-3 );  // Do not consider features for previous tags, + features for the presence of digits and capital letters (there are too few of them)!
00254             thetas_times_x.resize( 50, feat_lengths.length()-3 );
00255             for(int i=0; i<thetas.length(); i++)  {
00256                 if(nhidden>0)
00257                     thetas[i].resize( 50, nhidden +1); // +1 for the bias                
00258                 else
00259                     thetas[i].resize( 50, feat_lengths[i] );
00260             }
00261         }
00262         else
00263         {
00264             thetas.resize(0);
00265             thetas_times_x.resize(0,0);
00266         }
00267 
00268         if(nhidden > 0 && use_thetas_for_hidden_weights)
00269         {
00270             thetahids.resize( feat_lengths.length()-3 );  // Do not consider features for previous tags, + features for the presence of digits and capital letters (there are too few of them)!
00271             thetahids_times_x.resize( 50, feat_lengths.length()-3 );
00272             for(int i=0; i<thetahids.length(); i++)  {
00273                 thetahids[i].resize( 50, feat_lengths[i] );
00274             }
00275         }
00276         else
00277         {
00278             thetahids.resize(0);
00279             thetahids_times_x.resize(0,0);
00280         }
00281 
00282     }
00283     else
00284     {
00285         int nfeat = sum(feat_lengths);
00286         if(nhidden <= 0 || use_thetas_for_output_weights)
00287         {
00288             thetas.resize( 1 );  // Do not consider features for previous tags, + features for the presence of digits and capital letters (there are too few of them)!
00289             thetas_times_x.resize( 50, 1 );
00290             if(nhidden>0)
00291                 thetas[0].resize( 50, nhidden +1); // +1 for the bias                
00292             else
00293                 thetas[0].resize( 50, nfeat );            
00294         }
00295         else
00296         {
00297             thetas.resize(0);
00298             thetas_times_x.resize(0,0);
00299         }
00300 
00301         if(nhidden > 0 && use_thetas_for_hidden_weights)
00302         {
00303             thetahids.resize( 1 );  // Do not consider features for previous tags, + features for the presence of digits and capital letters (there are too few of them)!
00304             thetahids_times_x.resize( 50, 1 );            
00305             thetahids[0].resize( 50, nfeat );            
00306         }
00307         else
00308         {
00309             thetahids.resize(0);
00310             thetahids_times_x.resize(0,0);
00311         }
00312 
00313     }    
00314 }
00315 
00316 // For now everything is done in the train. For sure, that's not a good, for example if we want to
00317 // reload ...
00318 void StructuralLearner::build_()
00319 {
00320     std::cerr << "StructuralLearner::build_()" << std::endl;
00321 
00322   if(train_set)
00323   {
00324     // ***** Sanity checks
00325     if(weightsize_ < 0)
00326       PLWARNING("In StructuralLearner::build_(): negative weightsize_");
00327     if(weightsize_ > 0)
00328       PLWARNING("In StructuralLearner::build_(): does not support weighting of the training set");
00329     if(targetsize_ < 0)
00330       PLWARNING("In StructuralLearner::build_(): negative targetsize_");
00331     if(targetsize_ > 1)
00332       PLWARNING("In StructuralLearner::build_(): multi-target learning is not supported, only one (hardcoded) target will be considered");
00333 
00334     // ***** Resize vectors
00335     input.resize(inputsize());
00336     target.resize(targetsize());
00337     costs.resize(getTrainCostNames().length());        
00338     
00339     initPreviousLabelCurrentWordBigramMapping();
00340     
00341     // dummy call to computeFeatures in order to set fls
00342     computeFeatures(input, target, 0, 0, feats);
00343 
00344     // Make sure that all feats have non null storage
00345     for(int i=0; i<feats.length(); i++)
00346         feats[i].resize(1);
00347 
00348     if(auxiliary_task_train_set && stage == 0)
00349         buildTasksParameters(2*n_auxiliary_wordproblems,fls);
00350     else
00351         buildTasksParameters(outputsize(),fls);
00352     
00353     if(auxiliary_task_train_set)
00354         buildThetaParameters(fls);
00355     
00356     if(stage==0 || stage ==1)
00357         initializeParams();
00358     
00359     if( auxiliary_task_train_set && stage==0 && auxiliary_indices_left.size()==0) {
00360         initWordProblemsStructures();
00361     }
00362   }// if we have a train_set
00363 }
00364 
00365 // ### Nothing to add here, simply calls build_
00366 void StructuralLearner::build()
00367 {
00368     inherited::build();
00369     build_();
00370 }
00371 
00372 
00373 void StructuralLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00374 {
00375     inherited::makeDeepCopyFromShallowCopy(copies);
00376 
00377     deepCopyField(thetas, copies);
00378     deepCopyField(thetas_times_x, copies);
00379     deepCopyField(thetahids, copies);
00380     deepCopyField(thetahids_times_x, copies);
00381     deepCopyField(auxiliary_task_train_set,copies);
00382     deepCopyField(ws,copies);
00383     deepCopyField(vs,copies);
00384     deepCopyField(whids,copies);
00385     deepCopyField(vhids,copies);
00386     deepCopyField(feats, copies);
00387     deepCopyField(input, copies);
00388     deepCopyField(target, copies);
00389     deepCopyField(activations, copies);
00390     deepCopyField(before_softmax, copies);
00391     deepCopyField(output, copies);
00392     deepCopyField(costs, copies);
00393     deepCopyField(auxiliary_indices_current, copies);
00394     deepCopyField(auxiliary_indices_left, copies);
00395     deepCopyField(viterbi_table, copies);
00396 
00397     deepCopyField(currentFeatureGroup, copies);
00398     deepCopyField(fls, copies);
00399 
00400     // ### Remove this line when you have fully implemented this method.
00401     //PLERROR("StructuralLearner::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00402 }
00403 
00404 
00405 int StructuralLearner::outputsize() const
00406 {
00407     return(train_set->getDictionary(inputsize_)->size() + (train_set->getDictionary(inputsize_)->oov_not_in_possible_values ? 0 : 1));
00408 }
00409 
00410 void StructuralLearner::forget()
00411 {
00414 
00420     if(ws.size()!=0) // This means that build was called...
00421         initializeParams();
00422     
00423     stage = 0;   
00424 }
00425     
00426 void StructuralLearner::train()
00427 {
00428   if (!initTrain())
00429       return;
00430 
00431   //Profiler p;
00432   //p.activate();
00433 
00434   int nout = outputsize();
00435   real lambda_times_2 = lambda*2;
00436   real log_softmax_gradient = 0;
00437   real v_times_theta = 0;
00438   // Compute thetas over auxiliary task,
00439   // if an auxiliary problem is given
00440   //p.start("All train");
00441   if( auxiliary_task_train_set && stage == 0)  {
00442       
00443       // Preprocessing of auxiliary task should be done by now!
00444 
00445       // Train initial weights ws
00446       std::cerr << "StructuralLearner::train() - Training learner for SVD" << std::endl;    
00447 
00448       nout = 2*n_auxiliary_wordproblems;
00449       best_error=REAL_MAX;
00450       current_error=REAL_MAX/2;
00451       int it = 0;
00452       int n_auxiliary_samples = auxiliary_indices_current.length()+auxiliary_indices_left.length();
00453       int begin_class = 0;
00454       int end_class = n_auxiliary_wordproblems;
00455 
00456       while(current_error < best_error - epsilon && it < max_stage)  {
00457           best_error = current_error;
00458           train_stats->forget();          
00459           for(int t=0; t<n_auxiliary_samples; t++)  {
00460               learning_rate = start_learning_rate / (1+decrease_constant*(it*n_auxiliary_samples+t));          
00461               if(t<auxiliary_indices_current.length())
00462               {
00463                   begin_class = 0;
00464                   end_class = n_auxiliary_wordproblems;
00465                   auxiliary_task_train_set->getExample(auxiliary_indices_current(t,0), input, target, weight);
00466                   target.resize(5);
00467                   target.fill(MISSING_VALUE);
00468                   target[2] = auxiliary_indices_current(t,1);
00469                   computeFeatures(input,target,1,t,feats,27);
00470                   //p.start("Auxiliary computeOutputWithFeatures");
00471                   computeOutputWithFeatures(feats,output,false,begin_class,end_class); 
00472                   //p.end("Auxiliary computeOutputWithFeatures");
00473               }
00474               else
00475               {
00476                   begin_class = n_auxiliary_wordproblems;
00477                   end_class = 2*n_auxiliary_wordproblems;
00478                   auxiliary_task_train_set->getExample(auxiliary_indices_left(t-auxiliary_indices_current.length(),0), input, target, weight);
00479                   target.resize(5);
00480                   target.fill(MISSING_VALUE);
00481                   target[2] = n_auxiliary_wordproblems+auxiliary_indices_left(t-auxiliary_indices_current.length(),1);
00482 
00483                   computeFeatures(input,target,1,t,feats,23);
00484                   //p.start("Auxiliary computeOutputWithFeatures");
00485                   computeOutputWithFeatures(feats,output,false,begin_class,end_class); 
00486                   //p.end("Auxiliary computeOutputWithFeatures");
00487               }
00488               
00489               computeCostsFromOutputs(input, output, target, costs);
00490               train_stats->update(costs);
00491 
00492               //p.start("Auxiliary update");
00493               for(int i=begin_class; i<end_class; i++)
00494               {
00495                   good_class_softmax_gradient[i] = learning_rate*(output[i]-1);
00496                   bad_class_softmax_gradient[i] = learning_rate*output[i];                  
00497               }
00498 
00499               // Update weights
00500               if(nhidden>0)
00501               {
00502                   for(int i=0; i<activations.length(); i++)
00503                       for(int j=0; j<activations.width(); j++)
00504                       {
00505                           activations_gradient(i,j) = 1-activations(i,j)*activations(i,j);
00506                       }
00507 
00508                   // Output weights update
00509                   for(int f=0; f<ws.length(); f++)
00510                   {
00511                       for(int i=begin_class; i<end_class; i++)
00512                       {
00513                           // Update w
00514                           for(int j=0; j<nhidden+1; j++)  {
00515                               if(i!=target[2])  {
00516                                   ws[f](i, j) -= bad_class_softmax_gradient[i]*activations(j,f) + (lambda != 0 ? lambda_times_2*ws[f](i,j) : 0);
00517                               }
00518                               else  {
00519                                   ws[f](i, j) -= good_class_softmax_gradient[i]*activations(j,f) + (lambda != 0 ? lambda_times_2*ws[f](i, j) : 0);
00520                               }
00521                           }
00522                       }
00523                   }
00524 
00525                   // Hidden weights update
00526                   
00527                   for(int f=0; f<ws.length(); f++)
00528                   {
00529                       
00530                       for(int j=0; j<nhidden; j++)  {
00531                           log_softmax_gradient = 0;
00532                           for(int i=begin_class; i<end_class; i++)
00533                           {
00534                               if(i!=target[2])  {
00535                                   log_softmax_gradient += bad_class_softmax_gradient[i]*ws[f](i,j);
00536                               }
00537                               else {
00538                                   log_softmax_gradient += good_class_softmax_gradient[i]*ws[f](i,j);
00539                               }
00540                           }
00541 
00542                           log_softmax_gradient *= activations_gradient(j,f);
00543                           
00544                           if(!separate_features)
00545                               for(int f2=0; f<whids.length(); f++)
00546                               {
00547                                   current_features = feats[f2].data();
00548                                   for(int k=0; k<feats[f2].length(); k++)
00549                                   { 
00550                                       whids[f2](j, current_features[k]) -= log_softmax_gradient + (lambda != 0 ? lambda_times_2*whids[f2](j,current_features[k]) : 0);                                  
00551                                   }                                  
00552                               }
00553                           else
00554                           {
00555                               current_features = feats[f].data();
00556                               for(int k=0; k<feats[f].length(); k++)
00557                               { 
00558                                   
00559                                   whids[f](j, current_features[k]) -= log_softmax_gradient + (lambda != 0 ? lambda_times_2*whids[f](j,current_features[k]) : 0);                          
00560                               }
00561                           }
00562                       }
00563                   }
00564                   
00565               }
00566               else
00567               {
00568                   for(int f=0; f<feats.length(); f++)
00569                   {
00570                       current_features = feats[f].data();
00571                       for(int i=begin_class; i<end_class; i++) 
00572                       {                                        
00573                           // Update w
00574                           for(int j=0; j<feats[f].length(); j++)  {
00575                               if(i!=target[2])  {
00576                                   ws[f](i, current_features[j]) -= bad_class_softmax_gradient[i] + (lambda != 0 ? lambda_times_2*ws[f](i, current_features[j]) : 0);
00577                               }
00578                               else  {
00579                                   ws[f](i, current_features[j]) -= good_class_softmax_gradient[i] + (lambda != 0 ? lambda_times_2*ws[f](i, current_features[j]) : 0);
00580                               }
00581                           }                                                             
00582                       }
00583                   }
00584               }
00585               //p.end("Auxiliary update");
00586           }
00587           it++;
00588           train_stats->finalize();
00589           current_error = train_stats->getMean()[0];
00590           cout << "Current error = " << current_error << endl;
00591       }
00592       
00593       // Now, using computed theta to bias training
00594       Mat V;
00595       Vec D;          
00596       for(int f=0; f<thetas.length(); f++)
00597       {
00598           // Perform SVD
00599           std::cerr << "StructuralLearner::train() - Performing " << f << "th SVD" << std::endl;
00600           Mat U_t;
00601           if(separate_features)
00602               U_t= sqrt(lambda) * ws[f];
00603           else
00604           {
00605               Array<Mat> to_concat(ws.length());
00606               for(int m=0; m<to_concat.length(); m++)
00607                   to_concat[m] = ws[m];
00608               U_t = hconcat(to_concat);
00609           }
00610 
00611           std::cout << "U_t.length() " << U_t.length() << " U_t.width() " << U_t.width() << std::endl;
00612           
00613           // --- Faire la SVD
00614           lapackSVD(U_t, thetas[f], D, V, 'S');
00615           
00616           std::cout << "thetas[f].length() " << thetas[f].length() << " thetas[f].width() " << thetas[f].width() << std::endl;
00617           
00618           thetas[f] = thetas[f].subMatRows(0, 50);
00619       }
00620 
00621       for(int f=0; f<thetahids.length(); f++)
00622       {
00623           // Perform SVD
00624           std::cerr << "StructuralLearner::train() - Performing " << f << "th SVD" << std::endl;
00625           Mat U_t;
00626           if(separate_features)
00627               U_t= sqrt(lambda) * whids[f];
00628           else
00629           {
00630               Array<Mat> to_concat(whids.length());
00631               for(int m=0; m<to_concat.length(); m++)
00632                   to_concat[m] = whids[m];
00633               U_t = hconcat(to_concat);
00634           }
00635 
00636           std::cout << "U_t.length() " << U_t.length() << " U_t.width() " << U_t.width() << std::endl;
00637           
00638           // --- Faire la SVD
00639           lapackSVD(U_t, thetahids[f], D, V, 'S');
00640           
00641           std::cout << "thetahids[f].length() " << thetahids[f].length() << " thetahids[f].width() " << thetahids[f].width() << std::endl;
00642           
00643           thetahids[f] = thetahids[f].subMatRows(0, 50);
00644       }
00645 
00646       // Resize and initialize ws, vs, whids, etc.      
00647       nout = outputsize();
00648       // Free parameters space
00649       for(int p=0; p<vs.length(); p++)
00650           vs[p] = Mat();
00651       for(int p=0; p<ws.length(); p++)
00652           ws[p] = Mat();
00653       for(int p=0; p<vhids.length(); p++)
00654           vhids[p] = Mat();
00655       for(int p=0; p<whids.length(); p++)
00656           whids[p] = Mat();
00657       buildTasksParameters(nout,fls);
00658       initializeParams();
00659       stage++;
00660   }
00661 
00662   while(stage<nstages)
00663   {
00664       // Train target classifier
00665       std::cerr << "StructuralLearner::train() - Training target classifier" << std::endl;                
00666       std::cerr << "StructuralLearner::train() - stage is " << stage << std::endl;
00667       
00668       train_stats->forget();
00669       int n_samples = train_set->length();
00670       // TODO: is this a good clear?
00671       //token_prediction.clear();
00672       for(int t=0; t<train_set->length(); t++)  {
00673           learning_rate = start_learning_rate / (1+decrease_constant*(stage*n_samples+t));          
00674           train_set->getExample(t, input, target, weight);
00675           computeFeatures(input,target,0,t,feats);
00676           // 1) compute the output
00677           //p.start("Main computeOutputWithFeatures");
00678           computeOutputWithFeatures(feats,output,auxiliary_task_train_set) ; 
00679           //p.end("Main computeOutputWithFeatures");
00680           // 2) compute the cost      
00681           computeCostsFromOutputs(input, output, target, costs);
00682           train_stats->update(costs);
00683           // TODO: verify if OK
00684           //updateDynamicFeatures(token_prediction_train,input[3*2],target[2]);
00685           // 3) Update weights                           
00686           //p.start("Main update");
00687 
00688           
00689           for(int i=0; i<nout; i++)
00690           {
00691               good_class_softmax_gradient[i] = learning_rate*(output[i]-1);
00692               bad_class_softmax_gradient[i] = learning_rate*output[i];                  
00693           }
00694 
00695           // Update weights
00696           if(nhidden>0)
00697           {
00698               for(int i=0; i<activations.length(); i++)
00699                   for(int j=0; j<activations.width(); j++)
00700                   {
00701                       activations_gradient(i,j) = 1-activations(i,j)*activations(i,j);
00702                   }
00703 
00704               // Output weights update
00705               for(int f=0; f<ws.length(); f++)
00706               {
00707                   for(int i=0; i<nout; i++)
00708                   {
00709                       // Update w
00710                       for(int j=0; j<nhidden+1; j++)  {
00711                           if(i!=target[2])  {
00712                               ws[f](i, j) -= bad_class_softmax_gradient[i]*activations(j,f) + (lambda != 0 ? lambda_times_2*ws[f](i,j) : 0);
00713                           }
00714                           else  {
00715                               ws[f](i, j) -= good_class_softmax_gradient[i]*activations(j,f) + (lambda != 0 ? lambda_times_2*ws[f](i, j) : 0);
00716                           }
00717                       }
00718                       if(auxiliary_task_train_set && use_thetas_for_output_weights && ((!separate_features && f==0) || (separate_features && f<thetas.length())))
00719                       {
00720                           // Update v
00721                           for(int j=0; j<50; j++)  {
00722                               if(i!=target[2])  {
00723                                   vs[f](i, j) -= bad_class_softmax_gradient[i]*thetas_times_x(j,f);
00724                               }
00725                               else  {
00726                                   vs[f](i, j) -= good_class_softmax_gradient[i]*thetas_times_x(j,f);
00727                               }
00728                           }
00729                       }
00730                   }
00731               }
00732 
00733               // Hidden weights update
00734                   
00735               for(int f=0; f<ws.length(); f++)
00736               {
00737                       
00738                   for(int j=0; j<nhidden; j++)  {
00739                       log_softmax_gradient = 0;
00740                       for(int i=0; i<nout; i++)
00741                       {
00742                           if(i!=target[2])  {
00743                               log_softmax_gradient += bad_class_softmax_gradient[i]*ws[f](i,j);
00744                           }
00745                           else {
00746                               log_softmax_gradient += good_class_softmax_gradient[i]*ws[f](i,j);
00747                           }
00748                           if(auxiliary_task_train_set && use_thetas_for_output_weights && f<vs.length())
00749                           {
00750                               v_times_theta = 0;
00751                               for(int l=0; l<50; l++)
00752                               {
00753                                   v_times_theta += vs[f](i,l) * thetas[f](l,j);
00754                               }
00755                                   
00756                               if(i!=target[2])  {
00757                                   log_softmax_gradient += bad_class_softmax_gradient[i]*v_times_theta;
00758                               }
00759                               else {
00760                                   log_softmax_gradient += good_class_softmax_gradient[i]*v_times_theta;
00761                               }
00762                                   
00763                           }
00764                       }
00765 
00766                       log_softmax_gradient *= activations_gradient(j,f);
00767                           
00768                       if(!separate_features)
00769                       {
00770                           for(int f2=0; f<whids.length(); f++)
00771                           {
00772                               current_features = feats[f2].data();
00773                               for(int k=0; k<feats[f2].length(); k++)
00774                               { 
00775                                   whids[f2](j, current_features[k]) -= log_softmax_gradient + (lambda != 0 ? lambda_times_2*whids[f2](j,current_features[k]) : 0);
00776                               }
00777                           }
00778                           if(auxiliary_task_train_set && use_thetas_for_hidden_weights && f<vhids.length())
00779                           {
00780                               // Update v
00781                               for(int l=0; l<50; l++)  {
00782                                   vhids[f](j, l) -= log_softmax_gradient*thetahids_times_x(l,0);
00783                               }
00784                           }
00785 
00786                       }
00787                       else
00788                       {
00789                           current_features = feats[f].data();
00790                           for(int k=0; k<feats[f].length(); k++)
00791                           { 
00792                                   
00793                               whids[f](j, current_features[k]) -= log_softmax_gradient + (lambda != 0 ? lambda_times_2*whids[f](j,current_features[k]) : 0);                          
00794                           }
00795                           if(auxiliary_task_train_set && use_thetas_for_hidden_weights && f<vhids.length())
00796                           {
00797                               // Update v
00798                               for(int l=0; l<50; l++)  {
00799                                   vhids[f](j, l) -= log_softmax_gradient*thetahids_times_x(l,f);
00800                               }
00801                           }
00802                       }
00803                   }
00804               }
00805                   
00806           }
00807           else
00808           {
00809               for(int f=0; f<feats.length(); f++)
00810               {
00811                   current_features = feats[f].data();
00812                   for(int i=0; i<nout; i++) 
00813                   {                                        
00814                       // Update w
00815                       for(int j=0; j<feats[f].length(); j++)  {
00816                           if(i!=target[2])  {
00817                               ws[f](i, current_features[j]) -= bad_class_softmax_gradient[i] + (lambda != 0 ? lambda_times_2*ws[f](i, current_features[j]) : 0);
00818                           }
00819                           else  {
00820                               ws[f](i, current_features[j]) -= good_class_softmax_gradient[i] + (lambda != 0 ? lambda_times_2*ws[f](i, current_features[j]) : 0);
00821                           }
00822                       }                                          
00823                       if(auxiliary_task_train_set && use_thetas_for_output_weights && ((!separate_features && f==0) || (separate_features && f<thetas.length())))
00824                       {
00825                           // Update v
00826                           for(int j=0; j<50; j++)  {
00827                               if(i!=target[2])  {
00828                                   vs[f](i, j) -=  bad_class_softmax_gradient[i]*thetas_times_x(j,f);
00829                               }
00830                               else  {
00831                                   vs[f](i, j) -= good_class_softmax_gradient[i]*thetas_times_x(j,f);
00832                               }
00833                           }
00834                       }
00835                           
00836                   }
00837               }
00838           }
00839           //p.end("Main update");
00840       }
00841       
00842       /*
00843         if(nhidden>0)
00844         {
00845         // Output weights update
00846         for(int f=0; f<(separate_features ? feats.length() : 1); f++)
00847         {
00848         for(int i=0; i<nout; i++) 
00849         {                                        
00850         // Update w
00851         for(int j=0; j<nhidden+1; j++)  {
00852         if(i!=target[2])  {
00853         ws[f](i, j) -= learning_rate*output[i]*activations(j,f) + (lambda != 0 ? 2*lambda*ws[f](i,j) : 0);
00854         }
00855         else  {
00856         ws[f](i, j) -= learning_rate*(output[i]-1)*activations(j,f) + (lambda != 0 ? 2*lambda*ws[f](i, j) : 0);
00857         }
00858         } 
00859         if(auxiliary_task_train_set && ((!separate_features && f==0) || (separate_features && f<thetas.length())))
00860         {
00861         // Update v
00862         for(int j=0; j<50; j++)  {
00863         if(i!=target[2])  {
00864         vs[f](i, j) -= learning_rate*output[i]*thetas_times_x(j,f);
00865         }
00866         else  {
00867         vs[f](i, j) -= learning_rate*(output[i]-1)*thetas_times_x(j,f);
00868         }
00869         }
00870         }
00871         }
00872         }
00873 
00874         // Hidden weights update
00875         for(int f=0; f<feats.length(); f++)
00876         {
00877         current_features = feats[f].data();
00878         for(int i=0; i<nout; i++) {
00879         for(int j=0; j<nhidden; j++)  
00880         {                          
00881         for(int k=0; k<feats[f].length(); k++)
00882         {                              
00883         if(i!=target[2])  {
00884         if(separate_features) whids[f](j, current_features[k]) -= learning_rate*output[i]*ws[f](i,j)*(1-mypow(activations(j,f),2)) + (lambda != 0 ? 2*lambda*whids[f](j,current_features[k]) : 0);
00885         else whids[f](j, current_features[k]) -= learning_rate*output[i]*ws[0](i,j)*(1-mypow(activations(j,0),2)) + (lambda != 0 ? 2*lambda*whids[f](j,current_features[k]) : 0);
00886         }
00887         else  {
00888         if(separate_features) whids[f](j, current_features[k]) -= learning_rate*(output[i]-1)*ws[f](i,j)*(1-mypow(activations(j,f),2)) + (lambda != 0 ? 2*lambda*whids[f](j,current_features[k]) : 0);
00889         else whids[f](j, current_features[k]) -= learning_rate*(output[i]-1)*ws[0](i,j)*(1-mypow(activations(j,0),2)) + (lambda != 0 ? 2*lambda*whids[f](j,current_features[k]) : 0);
00890         }
00891         }
00892         if(auxiliary_task_train_set && ((!separate_features && f==0) || (separate_features && f<thetahids.length())))
00893         {
00894         // Update v
00895         for(int j=0; j<50; j++)  {
00896         if(i!=target[2])  {
00897         vhids[f](i, j) -= learning_rate*output[i]*ws[f](i,j)*(1-mypow(activations(j,f),2))*thetahids_times_x(j,f);
00898         }
00899         else  {
00900         vhids[f](i, j) -= learning_rate*(output[i]-1)*ws[f](i,j)*(1-mypow(activations(j,f),2))*thetahids_times_x(j,f);
00901         }
00902         }
00903         }
00904         }
00905         }
00906         }
00907                   
00908         }
00909         else
00910         {
00911 
00912         for(int f=0; f<feats.length(); f++)
00913         {
00914         current_features = feats[f].data();
00915         for(int i=0; i<nout; i++) 
00916         {                                        
00917         // Update w
00918         for(int j=0; j<feats[f].length(); j++)  {
00919         if(i!=target[2])  {
00920         ws[f](i, current_features[j]) -= learning_rate*output[i] + (lambda != 0 ? 2*lambda*ws[f](i, current_features[j]) : 0);
00921         }
00922         else  {
00923         ws[f](i, current_features[j]) -= learning_rate*(output[i]-1) + (lambda != 0 ? 2*lambda*ws[f](i, current_features[j]) : 0);
00924         }
00925         }                                                             
00926 
00927         if(auxiliary_task_train_set && ((!separate_features && f==0) || (separate_features && f<thetas.length())))
00928         {
00929         // Update v
00930         for(int j=0; j<50; j++)  {
00931         if(i!=target[2])  {
00932         vs[f](i, j) -= learning_rate*output[i]*thetas_times_x(j,f);
00933         }
00934         else  {
00935         vs[f](i, j) -= learning_rate*(output[i]-1)*thetas_times_x(j,f);
00936         }
00937         }
00938         }
00939         }
00940         }
00941         }
00942         }
00943       */
00944       ++stage;
00945       train_stats->finalize(); // finalize statistics for this epoch
00946       
00947   }
00948   //p.end("All train");
00949 
00950   //p.report(cout);
00951 }
00952 
00953 void StructuralLearner::test(VMat testset, PP<VecStatsCollector> test_stats, 
00954                     VMat testoutputs, VMat testcosts) const
00955 {
00956     int l = testset.length();
00957 
00958     PP<ProgressBar> pb;
00959     if(report_progress) 
00960         pb = new ProgressBar("Testing learner",l);
00961 
00962     if (l == 0) {
00963         // Empty test set: we give -1 cost arbitrarily.
00964         costs.fill(-1);
00965         test_stats->update(costs);
00966     }
00967 
00968     // TODO: VITERBI!!!! This is cheating!!!
00969     for(int i=0; i<l; i++)
00970     {
00971         testset.getExample(i, input, target, weight);
00972       
00973         computeFeatures(input,target,-1,i,feats);
00974         computeOutputWithFeatures(feats,output,auxiliary_task_train_set);
00975         computeCostsFromOutputs(input,output,target,costs);
00976         //computeOutputAndCosts(input,target,output,costs);
00977 
00978         // TODO: update dynamic feature
00979         //updateDynamicFeatures(token_prediction_train,input[3*2],target[2]);
00980 
00981         if(testoutputs)
00982             testoutputs->putOrAppendRow(i,output);
00983 
00984         if(testcosts)
00985             testcosts->putOrAppendRow(i, costs);
00986 
00987         if(test_stats)
00988             test_stats->update(costs,weight);
00989 
00990         if(report_progress)
00991             pb->update(i);
00992     }
00993 
00994     // *** Test procedure using Viterbi decoding
00995     // A row's cell encodes for each current tag and previous tag the best score to get there.
00996     // a row index is computed as (tag-0) * nout + (tag-1)
00997     // Not yet functional
00998     // todo consider sentences independently (or watchout for underflow)
00999     // being at j means predicting class "j/nout" when the previous prediction is "j%nout"
01000     // we must look at all the possibilities for prediction predAtMinus2 and find the best
01001 
01002     if(false)
01003     {
01004         // *** Table width is nout^2 - index is computed as (tag-0) * nout + (tag-1)
01005         int nout = outputsize();
01006         viterbi_table.resize(100, nout*nout); // HACK - assuming a sentence is not over 100 words
01007 
01008         real neg_log_seq_output;
01009         int index;
01010 
01011         // ** Go through all examples
01012         int i=0;    // index on the test examples
01013         int ii=0;   // index on the current sentence's test examples
01014         int iim1;   // ii minus 1
01015 
01016         while (i<l) // ie while we still have examples decode a sentence
01017         {
01018 
01019           // * A) Start of a sentence -> predictions independent of 2 previous tags
01020 
01021           testset.getExample(i, input, target, weight);
01022           preds.fill(MISSING_VALUE);
01023           computeFeatures(input,preds,0,0,feats);
01024           computeOutputWithFeatures(feats,output);
01025           ii=0;   // reposition current sentence index
01026 
01027           // - Sanity check - really BOS?
01028           // HACK input[0] and input[7] are the left context wordtags 
01029           PA_DEBUG( if( !( is_missing(input[0]) && is_missing(input[7]) ) ) cerr << __FILE__ << __LINE__ << "error - not a BOS!" <<endl;)
01030 
01031           // first row
01032           for(int j=0; j<viterbi_table.width(); j++)
01033           {
01034               viterbi_table(ii,j).first = -safeflog( output[j/nout] );
01035               viterbi_table(ii,j).second = -1;
01036           }
01037 
01038           // * B) while next word is not BOS
01039           // Could also use "." if(viterbi_table(i-1,j).second/nout == index_dot)
01040 
01041           while( !( is_missing(input[0]) && is_missing(input[7]) ) )  {
01042 
01043             i++;
01044             ii++;
01045             iim1=ii-1;
01046 
01047             testset.getExample(i, input, target, weight);
01048             preds.fill(MISSING_VALUE);
01049             computeFeatures(input,preds,0,0,feats);
01050 
01051             // use previous row entries to compute the current one's
01052             // TODO save a couple ops by segmenting this in two loops
01053             for(int j=0; j<viterbi_table.width(); j++)
01054             {
01055               // Set previous predictions
01056               if( i>1 )  {
01057                 preds[0] = j%nout;
01058               } else  {
01059                 preds[0] = MISSING_VALUE;
01060               }
01061               preds[1] = j/nout;
01062 
01063               updateFeatures(input,preds,feats);
01064               computeOutputWithFeatures(feats,output);
01065 
01066               // this left prediction context has nout possible current predictions
01067               for( int k=0; k<nout; k++)
01068               {
01069                   index = j/nout + k*nout;  // current row index predicting k with p-1 = j/nout
01070 
01071                   neg_log_seq_output = (-safeflog(output[k]) + viterbi_table(iim1,j).first*iim1)/(ii); // score of predicting k with p-2 = j%nout and p-1 = j/nout
01072 
01073                   if(viterbi_table(ii,index).first > neg_log_seq_output)
01074                   {
01075                       viterbi_table(i,index).first = neg_log_seq_output;
01076                       viterbi_table(i,index).second = j;
01077                   }
01078               }
01079             } // for the previous row's elements
01080           } //while haven't reached a new sentence
01081 
01082           // * C) Decode from table
01083           // 1) search last row for best score
01084           real best_score = viterbi_table(ii,0).first;
01085           int best_index = 0;
01086 
01087           for(int j=1; j<viterbi_table.width(); j++)
01088           {
01089             if( viterbi_table(ii,j).first < best_score )  {
01090               best_score = viterbi_table(ii,j).first;
01091               best_index = j;
01092             }
01093           }
01094 
01095           // 2) Retrace best tags - will be in reversed order
01096           vector<int> v_predictions_r;
01097 
01098           for(int iii ; iii >=0; iii--)  {
01099             v_predictions.push_back( best_index/nout );
01100             best_index = viterbi_table(iii,best_index).second;
01101           }
01102 
01103           vector<int> v_predictions;
01104 
01105           vector<int>::reverse_iterator ritr = l_predictions_r.begin();
01106           while( ritr != l_predictions_r.end() )  {
01107             v_predictions.push_back( *ritr );
01108             ritr++;
01109           }
01110 
01111           // 3) Compute cost - TODO needs some nicer coding
01112           for(int j=i-ii; j<i; j++)
01113           {
01114             testset.getExample(j, input, target, weight);
01115 
01116             int jj=j-(i-ii);
01117 /*
01118             // Set previous predictions
01119             if( jj>1 )  {
01120               preds[0] = %nout;
01121             } else  {
01122               preds[0] = MISSING_VALUE;
01123             }
01124             if 
01125             preds[1] = j/nout;
01126 
01127 
01128               computeFeatures(input,target,-1,i,feats);
01129               computeOutputWithFeatures(feats,output,auxiliary_task_train_set);
01130               computeCostsFromOutputs(input,output,target,costs);
01131               //computeOutputAndCosts(input,target,output,costs);
01132         
01133               // TODO: update dynamic feature
01134               //updateDynamicFeatures(token_prediction_train,input[3*2],target[2]);
01135         
01136               if(testoutputs)
01137                   testoutputs->putOrAppendRow(i,output);
01138         
01139               if(testcosts)
01140                   testcosts->putOrAppendRow(i, costs);
01141         
01142               if(test_stats)
01143                   test_stats->update(costs,weight);
01144         
01145               if(report_progress)
01146                   pb->update(i);*/
01147           }
01148 
01149 
01150           // Go to next sentence
01151           v_predictions.clear();
01152           v_predictions_r.clear();
01153           i++;
01154 
01155 
01156           // Decode from table
01157   /*      for(int i=0; i<l; i++)
01158         {
01159             if(testoutputs)
01160                 testoutputs->putOrAppendRow(i,output);
01161 
01162             if(testcosts)
01163                 testcosts->putOrAppendRow(i, costs);
01164 
01165             if(test_stats)
01166                 test_stats->update(costs,weight);
01167 
01168             if(report_progress)
01169                 pb->update(i);
01170         }
01171 */
01172 
01173 
01174 /*        // *** Fill first row
01175 
01176         testset.getExample(0, input, target, weight);
01177         preds.fill(MISSING_VALUE);
01178         computeFeatures(input,preds,0,0,feats);
01179         computeOutputWithFeatures(feats,output);
01180 
01181         for(int j=0; j<viterbi_table.width(); j++)
01182         {
01183             viterbi_table(0,j).first = -safeflog(output[j/nout]);
01184             viterbi_table(0,j).second = -1;
01185         }
01186 
01187 
01188         // Compute table
01189         for(int i=1; i<l; i++)
01190         {
01191             testset.getExample(i, input, target, weight);
01192             computeFeatures(input,preds,0,0,feats);
01193             for(int j=0; j<viterbi_table.width(); j++)
01194             {
01195                 if( i>1) preds[0] = j%nout;
01196                 else preds[0] = MISSING_VALUE;
01197                 preds[1] = j/nout;                                          //!!!!!!!!!!
01198                 // Take into account "."
01199                 // if(viterbi_table(i-1,j).second/nout == index_dot)
01200                 updateFeatures(input,preds,feats);
01201                 computeOutputWithFeatures(feats,output);
01202                 for( int k=0; k<nout; k++)
01203                 {
01204                     index = j/nout + k*nout;
01205                     neg_log_seq_output = (-safeflog(output[k]) + viterbi_table(i-1,j).first*i)/(i+1);
01206                     if(viterbi_table(i,index).first > neg_log_seq_output)
01207                     {
01208                         viterbi_table(i,index).first = neg_log_seq_output;
01209                         viterbi_table(i,index).second = j;
01210                     }
01211                 }
01212             }
01213         }
01214 
01215         // Decode from table
01216         for(int i=0; i<l; i++)
01217         {
01218             if(testoutputs)
01219                 testoutputs->putOrAppendRow(i,output);
01220 
01221             if(testcosts)
01222                 testcosts->putOrAppendRow(i, costs);
01223 
01224             if(test_stats)
01225                 test_stats->update(costs,weight);
01226 
01227             if(report_progress)
01228                 pb->update(i);
01229         }*/
01230     } // while still examples
01231   } // if viterbi decoding
01232 
01233 }
01234 
01235 void StructuralLearner::computeOutputWithFeatures(TVec<TVec<unsigned int> >& feats, Vec& output, bool use_theta, int begin_class, int end_class) const
01236 {
01237     if(begin_class < 0) begin_class = 0;
01238     if(end_class < 0) end_class = output.length();
01239     /*
01240     if(only_this_class < 0) 
01241     {
01242         output.resize(ws[0].lenght());
01243         before_softmax.resize(ws[0].lenght());
01244     }
01245     else 
01246     {
01247         output.resize(1);
01248         before_softmax.resize(1);
01249     }
01250     */
01251     for(int i=0; i<before_softmax.length(); i++) {
01252         before_softmax[i] = 0;
01253     }
01254 
01255     // TODO: computations with Neural Network
01256 
01257     if(nhidden > 0)
01258     {
01259 
01260         if(use_theta && use_thetas_for_hidden_weights)
01261         {
01262             fl = 0;
01263             // compute theta * x
01264             thetahids_times_x.clear();
01265             for(int f=0; f<(separate_features ? thetahids.length() : feats.length()); f++)
01266             {
01267                 current_features = feats[f].data();
01268                 for(int j=0; j<50; j++)
01269                 {
01270                     for(int k=0; k<feats[f].length(); k++)
01271                         if(separate_features)
01272                             thetahids_times_x(j,f) += thetahids[f](j,current_features[k]);
01273                         else
01274                             thetahids_times_x(j,0) += thetahids[0](j,current_features[k]+fl);
01275                 }
01276                 fl += whids[f].width();
01277             }
01278         }
01279 
01280 
01281         activations.clear();
01282         activations.lastRow().fill(1.0);
01283         for(int f=0; f<feats.length(); f++)
01284         {
01285             current_features = feats[f].data();
01286             for(int i=0; i<nhidden; i++) {
01287                 for(int j=0; j<feats[f].length(); j++)  {
01288                     if(separate_features)
01289                         activations(i,f) += whids[f](i, current_features[j]);
01290                     else
01291                         activations(i,0) += whids[f](i, current_features[j]);
01292                 }
01293                 if(use_theta && use_thetas_for_hidden_weights && ((!separate_features && f==0) || (separate_features && f<thetahids.length())))
01294                     for(int ii=0; ii<50; ii++) {
01295                         activations(i,f) += vhids[f](i, ii)*thetahids_times_x(ii,f);
01296                     }        
01297                 if(separate_features)
01298                     activations(i,f) = tanh(activations(i,f));
01299             }
01300         }
01301 
01302         if(!separate_features)
01303             for(int i=0; i<nhidden; i++)
01304                 activations(i,0) = tanh(activations(i,0));
01305 
01306         if(use_theta && use_thetas_for_output_weights)
01307         {
01308             // compute theta * x
01309             thetas_times_x.clear();
01310             for(int f=0; f< thetas.length(); f++)
01311             {
01312                 for(int j=0; j<50; j++)
01313                 {
01314                     for(int k=0; k<nhidden+1; k++)
01315                         thetas_times_x(j,f) += thetas[f](j,k)*activations(k,f);
01316                 }
01317             }
01318             
01319         }
01320         
01321         for(int f=0; f<(separate_features ? feats.length() : 1); f++)
01322         {
01323             //if(only_this_class < 0)
01324             //{
01325                 for(int i=begin_class; i<end_class; i++) {
01326                     for(int j=0; j<nhidden+1; j++)  {
01327                         before_softmax[i] += ws[f](i, j) * activations(j,f); 
01328                     }
01329                     if(use_theta && use_thetas_for_output_weights && ((!separate_features && f==0) || (separate_features && f<thetas.length())))
01330                         for(int ii=0; ii<50; ii++) {
01331                             before_softmax[i] += vs[f](i, ii)*thetas_times_x(ii,f);
01332                         }        
01333                 }
01334                 /*
01335             }
01336             else
01337             {
01338                 for(int j=0; j<nhidden; j++)  {
01339                     before_softmax[0] += ws[f](only_this_class, j) * activations(j,f); 
01340                 }
01341                 if(use_theta)
01342                     for(int ii=0; ii<50; ii++) {
01343                         before_softmax[0] += vs[f](only_this_class, ii)*thetas_times_x(ii,f);
01344                     }       
01345             }
01346                 */
01347         }
01348     }
01349     else
01350     {
01351         if(use_theta && (use_thetas_for_output_weights || use_thetas_for_hidden_weights))
01352         {
01353             fl = 0;
01354             // compute theta * x
01355             thetas_times_x.clear();
01356             for(int f=0; f<(separate_features ? thetas.length() : feats.length() ); f++)
01357             {
01358                 current_features = feats[f].data();
01359                 for(int j=0; j<50; j++)
01360                 {
01361                     for(int k=0; k<feats[f].length(); k++)
01362                         if(separate_features)
01363                             thetas_times_x(j,f) += thetas[f](j,current_features[k]);
01364                         else
01365                             thetas_times_x(j,0) += thetas[0](j,current_features[k]+fl);
01366                 }
01367                 fl += ws[f].width();
01368             }
01369         }
01370         
01371         for(int f=0; f<feats.length(); f++)
01372         {
01373             current_features = feats[f].data();
01374             //if(only_this_class < 0)
01375             //{
01376                 for(int i=begin_class; i<end_class; i++) {
01377                     for(int j=0; j<feats[f].length(); j++)  {
01378                         before_softmax[i] += ws[f](i, current_features[j]);
01379                     }
01380                     if(use_theta && use_thetas_for_output_weights && ((!separate_features && f==0) || (separate_features && f<thetas.length())))
01381                         for(int ii=0; ii<50; ii++) {
01382                             before_softmax[i] += vs[f](i, ii) * thetas_times_x(ii,f) ;
01383                         }        
01384                 }
01385                 /*
01386             }
01387             else
01388             {
01389                 for(int j=0; j<feats[f].length(); j++)  {
01390                     before_softmax[0] += ws[f](only_this_class, current_features[j]);
01391                 }
01392                 if(use_theta && (separate_features || f==0))
01393                     for(int ii=0; ii<50; ii++) {
01394                         before_softmax[0] += vs[f](only_this_class, ii) * thetas_times_x(ii,f) ;
01395                     }
01396             }
01397                 */
01398         }
01399 
01400     }
01401 
01402     //if(only_this_class < 0)
01403     if(begin_class != 0 || end_class != output.length())
01404         softmax(before_softmax.subVec(begin_class,end_class-begin_class),output.subVec(begin_class,end_class-begin_class));
01405     else
01406         softmax(before_softmax,output);
01407 }
01408 
01409 
01410 void StructuralLearner::computeOutput(const Vec& input, Vec& output) const
01411 {
01412     PLERROR("In StructuralLearner::computeOutput(): not implemented");
01413 }    
01414 
01415 void StructuralLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 
01416                                            const Vec& target, Vec& costs) const
01417 {   
01418     // Compute the costs from *already* computed output.    
01419     int argout;
01420     real output_index_O = output[index_O];
01421     if(index_O < 0 || output_index_O > abstention_threshold)
01422         argout = argmax(output);
01423     else
01424     {
01425         output[index_O] = -1;
01426         argout = argmax(output);
01427         output[index_O] = output_index_O;
01428     }
01429     costs[0] = -safeflog( output[(int)target[2]] );
01430     costs[1] = argout == target[2] ? 0 : 1; //class_error(output,target);
01431     if(argout != index_O) costs[2] = costs[1];
01432     else costs[2] = MISSING_VALUE;
01433     if(target[2] != index_O) costs[3] = costs[1];
01434     else costs[3] = MISSING_VALUE;     
01435 }
01436 
01437 TVec<string> StructuralLearner::getTestCostNames() const
01438 {
01439     // Return the names of the costs computed by computeCostsFromOutpus
01440     // (these may or may not be exactly the same as what's returned by getTrainCostNames).
01441     TVec<string> ret;
01442     ret.resize(4);
01443     ret[0] = "NLL";
01444     ret[1] = "class_error";
01445     ret[2] = "precision";
01446     ret[3] = "recall";
01447     return ret;
01448 }
01449 
01450 TVec<string> StructuralLearner::getTrainCostNames() const
01451 {
01452     // Return the names of the objective costs that the train method computes and 
01453     // for which it updates the VecStatsCollector train_stats
01454     // (these may or may not be exactly the same as what's returned by getTestCostNames).
01455     TVec<string> ret;
01456     ret.resize(4);
01457     ret[0] = "NLL";
01458     ret[1] = "class_error";
01459     ret[2] = "precision";
01460     ret[3] = "recall";
01461     return ret;
01462 }
01463 
01486 void StructuralLearner::computeFeatures(const Vec& input, const Vec& target, int data_set, int index, TVec< TVec<unsigned int> >& theFeatureGroups, char
01487 featureMask) const 
01488 {
01489     
01490 
01491     fl=0;               // length of the onehot encoded features (stands for "features' length")
01492 
01493     // We have 5 feature groups
01494     theFeatureGroups.resize(6);
01495     fls.resize(6);
01496 
01497     // *** Wordtag features ***
01498     // Wordtags in a 5 word window with a onehot encoding
01499     // Derived from the wordtags input[0], input[7], input[14], input[21],
01500     // input[28]
01501     currentFeatureGroup = theFeatureGroups[0];
01502     currentFeatureGroup.resize(6);
01503     size = 0;
01504     for(int i=0, ii=0; i<5; i++)  {
01505         ii=7*i;
01506         
01507         if( featureMask & (1<<i) ) {        // we are doing this test often, but it should be quick enough. If need be we'll optimize the function
01508             if( !is_missing(input[ii]) ) {
01509                 currentFeatureGroup[size] = (unsigned int)(fl + input[ii]);
01510                 size++;
01511             }   
01512             // I don't think having a feature for missing value will help...
01513             /*
01514               else      {
01515               currentFeatureGroup.push_back( fl + (train_set->getDictionary(ii))->size() + 1 );  // explicitly say it's missing
01516               }
01517               fl += (train_set->getDictionary(ii))->size()+2; // +1 for OOV and +1 for missing<
01518             */
01519         }
01520 
01521         fl += (train_set->getDictionary(ii))->size()+1;
01522     }//for wordtags
01523 
01524     // For the bias!!!
01525     currentFeatureGroup[size] = fl;
01526     size++; 
01527     fl++; 
01528     fls[0] = fl;
01529     theFeatureGroups[0].resize(size);
01530 
01531 
01532     // *** Prefix features ***
01533     // Prefix features - prefix tag 
01534     // Derived from input[1], input[8], input[15], input[23], input[31])
01535     currentFeatureGroup = theFeatureGroups[1];
01536     currentFeatureGroup.resize(5);
01537     size = 0;   
01538     fl=0;
01539     for(int i=0, ii=0; i<5; i++)  {        
01540         ii=7*i+1;
01541         
01542         if( featureMask & (1<<i) ) {        // we are doing this test often, but it should be quick enough. If need be we'll optimize the function
01543             // Prefix tag is not missing, look at it
01544             if( !is_missing(input[ii]) ) {
01545                 currentFeatureGroup[size] = (unsigned int)(fl + input[ii]);
01546                 size++;
01547             }
01548         }    
01549         fl += (train_set->getDictionary(ii))->size()+1;
01550     }//for 5 word window
01551     theFeatureGroups[1].resize(size);
01552     fls[1] = fl;
01553 
01554 
01555     // *** Suffix features ***
01556     // Suffix features - suffix tags
01557     // Derived from input[2], input[9], ... 
01558     currentFeatureGroup = theFeatureGroups[2];
01559     currentFeatureGroup.resize(5);
01560     size = 0;   
01561     fl=0;
01562     for(int i=0, ii=0; i<5; i++)  
01563     {
01564         ii=7*i+2;
01565                 
01566         if( featureMask & (1<<i) ) {        // we are doing this test often, but it should be quick enough. If need be we'll optimize the function
01567             // Suffix tag is not missing, look at it
01568             if( !is_missing(input[ii]) ) {
01569                 currentFeatureGroup[size] = (unsigned int)(fl + input[ii]);
01570                 size++;
01571             }
01572         }    
01573         fl += (train_set->getDictionary(ii))->size()+1;
01574     }//for 5 word window
01575     theFeatureGroups[2].resize(size);
01576     fls[2] = fl;
01577 
01578 
01579     // *** Char type features ***
01580     // Char type features in a 5 word window - 4 features (1 if true, 0 if not):
01581     //          -1st letter capitalized
01582     //          -All letters capitalized
01583     //          -All digits
01584     //          -All digits and '.'  ','
01585     // Explicit from input[3], input[4], input[5], input[6], input[10], ...
01586 
01587     currentFeatureGroup = theFeatureGroups[3];
01588     currentFeatureGroup.resize(20);
01589     size = 0;
01590     fl = 0;
01591     for(int i=0, ii=0; i<5; i++)  {
01592         ii=7*i+3;
01593         
01594         if( featureMask & (1<<i) ) {        // we are doing this test often, but it should be quick enough. If need be we'll optimize the function
01595             // for 4 features
01596             for(int j=0; j<4; j++)  {
01597                 // feature not missing
01598                 if( !is_missing(input[ii]) ) {
01599                     // feature active
01600                     if(input[ii]==1)    {
01601                         currentFeatureGroup[size] = (unsigned int)(fl);
01602                         size++;
01603                     }
01604                 }      
01605                 fl++;
01606                 ii++;
01607             }
01608         }   else    {
01609             fl = fl+4;
01610         }
01611     }//for 5 word window
01612     theFeatureGroups[3].resize(size);
01613     fls[3] = fl;
01614 
01615     // *** "Bag of words in a 3 syntactic chunk window" features ***
01616     // we have this from preprocessing
01617 /*
01618     currentFeatureGroup = theFeatureGroups[5];
01619     currentFeatureGroup.resize(0);
01620     size = 0;   
01621     fl=0;
01622     // TODO: fetch correct wordsIn3SyntacticContext Vec, depending
01623     //       on the values of data_set and index
01624     //for(int i=0; i<wordsIn3SyntacticContext.length(); i++)    {
01625     //currentFeatureGroup.push_back(wordsIn3SyntacticContext[i]);
01626     //}
01627     theFeatureGroups[5].resize(size);
01628     fls[5] = fl;
01629 */
01630 
01631     // *** Label features ***
01632     // Labels of the 2 words on the left - should always be in the target (if we are decoding, then the target
01633     // should hold what we have predicted
01634     currentFeatureGroup = theFeatureGroups[4];
01635     currentFeatureGroup.resize(2);
01636     size = 0;   
01637     fl = 0;
01638     // Hugo: we don't use the tag features for auxiliary task???
01639     if( featureMask & 1 ) {       
01640         if( !is_missing(target[0]) ) {
01641             currentFeatureGroup.push_back( fl+(int)target[0] );
01642             size++;
01643         }
01644     }
01645     fl += (train_set->getDictionary(inputsize_))->size()+1;
01646         
01647     // Hugo: idem
01648     if( featureMask & 2) {       
01649         if( !is_missing(target[1]) ) {
01650             currentFeatureGroup.push_back( fl + (int)target[1] );
01651             size++;
01652         }
01653     }
01654     fl += (train_set->getDictionary(inputsize_))->size()+1;
01655     theFeatureGroups[4].resize(size);
01656     fls[4] = fl;
01657 
01658     // *** Bigrams of current token and label on the left
01659     currentFeatureGroup = theFeatureGroups[5];
01660     currentFeatureGroup.resize(1);
01661     fl = 0;
01662     size=0;
01663   
01664     // Hugo: idem!!!
01665     // if none of the 2 are masked than we'll compute the feature
01666     if( (featureMask & 2) && (featureMask & 4) ) {
01667         if( !is_missing(target[1]) && !is_missing(input[14]) ) {
01668           int bigram = (int)target[1] * ((train_set->getDictionary(0))->size()+1) + (int)input[14];
01669           std::map<int, int>::iterator itr_plcw_bigram_mapping;
01670 
01671           // is it in our mapping of bigrams seen in train_set?
01672           itr_plcw_bigram_mapping = plcw_bigram_mapping.find( bigram );
01673 
01674           if( itr_plcw_bigram_mapping != plcw_bigram_mapping.end() )  {
01675             currentFeatureGroup.push_back( itr_plcw_bigram_mapping->second );
01676             size++;
01677           }
01678         }
01679     } 
01680     fl += plcw_bigram_mapping.size();
01681     theFeatureGroups[5].resize(size);
01682     fls[5] = fl;
01683 
01684 
01685     // *** Previous occurences features ***
01686   /*  // ...
01687         
01688     fl = 0;
01689     size=0;
01690     // Add things here...
01691     theFeatureGroups[8].resize(size);
01692     fls[8] = fl;
01693   */  
01694 }
01695 
01712 void StructuralLearner::updateFeatures(const Vec& input, const Vec& target,  TVec< TVec<unsigned int> >& theFeatureGroups, char
01713 featureMask) const
01714 {
01715 
01716     // *** Label features ***
01717     // Labels of the 2 words on the left - should always be in the target (if we are decoding, then the target
01718     // should hold what we have predicted
01719     currentFeatureGroup = theFeatureGroups[4];
01720     currentFeatureGroup.resize(2);
01721     size = 0;
01722     fl = 0;
01723 
01724     // Hugo: we don't use the tag features for auxiliary task???
01725     if( featureMask & 1 ) {       
01726         if( !is_missing(target[0]) ) {
01727             currentFeatureGroup.push_back( fl+(int)target[0] );
01728             size++;
01729         }
01730     }
01731     fl += (train_set->getDictionary(inputsize_))->size()+1;
01732         
01733     // Hugo: idem
01734     if( featureMask & 2) {       
01735         if( !is_missing(target[1]) ) {
01736             currentFeatureGroup.push_back( fl + (int)target[1] );
01737             size++;
01738         }
01739     }
01740     fl += (train_set->getDictionary(inputsize_))->size()+1;
01741     theFeatureGroups[4].resize(size);
01742     fls[4] = fl;
01743 
01744     // *** Bigrams of current token and label on the left
01745     currentFeatureGroup = theFeatureGroups[5];
01746     currentFeatureGroup.resize(1);
01747     fl = 0;
01748     size=0;
01749   
01750     // Hugo: idem!!!
01751     // if none of the 2 are masked than we'll compute the feature
01752     if( (featureMask & 2) && (featureMask & 4) ) {      
01753         if( !is_missing(target[1]) && !is_missing(input[14]) ) {
01754 
01755           int bigram = (int)target[1] * ((train_set->getDictionary(0))->size()+1) + (int)input[14];
01756           std::map<int, int>::iterator itr_plcw_bigram_mapping;
01757 
01758           // is it in our mapping of bigrams seen in train_set?
01759           itr_plcw_bigram_mapping = plcw_bigram_mapping.find( bigram );
01760 
01761           if( itr_plcw_bigram_mapping != plcw_bigram_mapping.end() )  {
01762             currentFeatureGroup.push_back( itr_plcw_bigram_mapping->second );
01763             size++;
01764           }
01765         }
01766     } 
01767     fl += plcw_bigram_mapping.size();
01768     theFeatureGroups[5].resize(size);
01769     fls[5] = fl;
01770 
01771 
01772 }
01773 
01781 void StructuralLearner::initWordProblemsStructures()
01782 {
01783 
01784   // *** Determine most frequent words
01785   // Just a big fequency array.
01786 
01787   // 1) Create and init the freq table - has for size the size of the vocabulary +1 for OOV
01788   unsigned long int* frequency;
01789   frequency = new unsigned long int[ (auxiliary_task_train_set->getDictionary(0))->size() + 1];
01790   //memset(frequency, 0, ((train_set->getDictionary(6))->size()+1) * sizeof(unsigned long int) ); 
01791   for(int i=0; i<((auxiliary_task_train_set->getDictionary(0))->size()+1); i++)  {  
01792     frequency[i]=0;
01793   }
01794 
01795   // 2) Compute frequencies
01796   for(int e=0; e<auxiliary_task_train_set->length(); e++)  {
01797     auxiliary_task_train_set->getExample(e, input, target, weight);
01798     frequency[(int)input[14]]++;
01799   }
01800 
01801   // 3) extract most frequent entries -> build a map
01802   // build a stl vector (skip OOV output) and sort it
01803   std::vector<freqCount> tmp;
01804   for(int i=1; i<((auxiliary_task_train_set->getDictionary(0))->size()+1); i++)  {  
01805     tmp.push_back( freqCount(i, frequency[i]) );
01806   }
01807 
01808   delete []frequency;
01809 
01810   // Sort the items in descending order
01811   std::sort(tmp.begin(), tmp.end(), freqCountGT);
01812 
01813   // Build a map of the most frequent words' wordtags with their "most frequent word's"-tag
01814   std::map<int, int> map_mostFrequentWords;  // word tag is key, value is the net's output for it
01815   std::vector<freqCount>::iterator itr;
01816   int i;
01817   for(i=0, itr=tmp.begin(); itr!=tmp.end() && i<n_auxiliary_wordproblems; itr++, i++) {
01818     map_mostFrequentWords[itr->wordtag] = i; 
01819     //MostFrequentWordsCount+=itr->count;
01820   }
01821 
01822   tmp.clear();
01823 
01824 
01825   // *** Build the TMats for the auxiliary problems
01826   std::map<int, int>::iterator itr_map_mostFrequentWords;
01827   int leftWord_Wordtag, currentWord_Wordtag;
01828   int left_size=0;
01829   int current_size=0;
01830 
01831   auxiliary_indices_left.resize(auxiliary_task_train_set->length(), 2);
01832   auxiliary_indices_current.resize(auxiliary_task_train_set->length(), 2);
01833 
01834   for(int e=0; e<auxiliary_task_train_set->length(); e++)  {
01835     auxiliary_task_train_set->getExample(e, input, target, weight);
01836 
01837     // * if this example has a most frequent word at left
01838     leftWord_Wordtag = (int)input[7];
01839 
01840     itr_map_mostFrequentWords = map_mostFrequentWords.find( leftWord_Wordtag );
01841 
01842     if( itr_map_mostFrequentWords != map_mostFrequentWords.end() )  {
01843         auxiliary_indices_left[left_size][0] = e;
01844         auxiliary_indices_left[left_size][1] = itr_map_mostFrequentWords->second;
01845         left_size++;
01846     }
01847 
01848     // * if this example has a most frequent word at current
01849     currentWord_Wordtag = (int)input[14];
01850 
01851     itr_map_mostFrequentWords = map_mostFrequentWords.find( currentWord_Wordtag );
01852 
01853     if( itr_map_mostFrequentWords != map_mostFrequentWords.end() )  {
01854         auxiliary_indices_current[current_size][0] = e;
01855         auxiliary_indices_current[current_size][1] = itr_map_mostFrequentWords->second;
01856         current_size++;
01857     }
01858 
01859   }// end for auxiliary example
01860 
01861   map_mostFrequentWords.clear();
01862 
01863   auxiliary_indices_left.resize(left_size, 2);
01864   auxiliary_indices_current.resize(current_size, 2);
01865 
01866 }
01867 
01875 void StructuralLearner::initPreviousLabelCurrentWordBigramMapping()
01876 {
01877     int bigram;
01878     int currentBigramIndex=0;
01879 
01880     std::map<int, int>::iterator itr_plcw_bigram_mapping;
01881 
01882     // Attribute an index to "previous label - current word" bigrams seen in train_set
01883     for(int e=0; e<train_set->length(); e++)  {
01884         train_set->getExample(e, input, target, weight);
01885 
01886         if( !is_missing(target[1]) && !is_missing(input[14]) ) {
01887             // if no OOV
01888             // Hugo: OOV is not necessarily 0!!!
01889             //       anyway, I think we should consider OOV after all
01890             //if( (target[1] !=((train_set->getDictionary(inputsize_))->oov_tag_id)) && (input[14] != (train_set->getDictionary(0))->oov_tag_id)) )  {
01891             // The bigram 
01892             bigram = (int)target[1] * ((train_set->getDictionary(0))->size()+1) + (int)input[14];
01893 
01894             // if not already there, add it
01895             itr_plcw_bigram_mapping = plcw_bigram_mapping.find( bigram );
01896 
01897             if( itr_plcw_bigram_mapping == plcw_bigram_mapping.end() )  {
01898                 plcw_bigram_mapping[bigram] = currentBigramIndex;
01899                 currentBigramIndex++;                
01900             }
01901         }
01902     }// end for auxiliary example
01903 
01904 }
01905 
01906 
01907 
01908 /*
01909 //PA - need to integrate this
01910 int StructuralLearner::determineWordsIn3SyntacticContext(VMat example_set, TVec< TVec<unsigned int> >& wordsIn3SyntacticContext_set)    {
01911         
01912         TVec< unsigned int > leftSyntacticChunkBagOfWords;
01913         TVec< unsigned int > CurrentSyntacticChunkBagOfWords;
01914         TVec< unsigned int > RightSyntacticChunkBagOfWords;
01915         
01916         TVec< unsigned int > wordsIn3SyntacticContext;
01917 
01918         input[8] is current chunk
01919         
01920         // set currentSyntacticChunk
01921         //compute CurrentSyntacticChunkBagOfWords and RightSyntacticChunkBagOfWords
01922         // then cat into wordsIn3SyntacticContext
01923         
01924         for(int e=0; e<train_set->length(); e++)  {
01925                 train_set->getExample(e, input, target, weight);
01926         
01927                 // We encounter a new chunk
01928                 if( input[8] != currentSyntacticChunk ) {               // input[8] is the current syntactic chunk - never a missing value
01929                         leftSyntacticChunkBagOfWords = CurrentSyntacticChunkBagOfWords;
01930                         CurrentSyntacticChunkBagOfWords = RightSyntacticChunkBagOfWords;
01931                         // set currentSyntacticChunk
01932                         // compute new RightSyntacticChunkBagOfWords
01933                         // readjust wordsIn3SyntacticContext by cating all 3 (insure unicity? YES!)
01934                 }
01935                 
01936                 wordsIn3SyntacticContext_set.push_back(wordsIn3SyntacticContext);
01937                 
01938         }//for the examples
01939                 
01940                 
01941         return 0;
01942 }
01943 
01944 */
01945 
01946 
01947 
01949 // initializeParams //
01951 void StructuralLearner::initializeParams(bool set_seed)
01952 {
01953     if (set_seed) {
01954         if (seed_>=0)
01955             manual_seed(seed_);
01956         else
01957             PLearn::seed();
01958     }
01959 
01960   // initialize weights
01961   if (train_set) {
01962     real delta;
01963     int is;
01964 
01965     if(nhidden <= 0)
01966     {
01967         for(int i=0; i<ws.length(); i++) {
01968             ws[i].fill(0.0);
01969         }
01970     }
01971     else
01972     {
01973         is = 0;
01974         for(int i=0; i<ws.length(); i++) {
01975             is += ws[i].size();
01976         }
01977         for(int i=0; i<ws.length(); i++) {
01978             delta = 1.0 / sqrt(real(is));
01979             fill_random_uniform(ws[i], -delta, delta);
01980         }
01981     }
01982 
01983     is = vs.length() * 50;
01984     for(int i=0; i<vs.length(); i++) {
01985         delta = 1.0 / sqrt(real(is));
01986         fill_random_uniform(vs[i], -delta, delta);
01987     }
01988     
01989     is = vhids.length() * 50;
01990     for(int i=0; i<vhids.length(); i++) {
01991         delta = 1.0 / sqrt(real(is));
01992         fill_random_uniform(vhids[i], -delta, delta);
01993     }
01994     
01995     if(nhidden > 0)
01996     {        
01997         for(int i=0; i<whids.length(); i++) 
01998         {
01999             /*
02000               is = whids[i].size();
02001               delta = 1.0 / sqrt(real(is));
02002               fill_random_uniform(whids[i], -delta, delta);
02003             */
02004             whids[i].fill(0.0);
02005         }
02006     }
02007   }
02008 }
02009 
02010 } // end of namespace PLearn
02011 
02012 
02013 /*
02014   Local Variables:
02015   mode:c++
02016   c-basic-offset:4
02017   c-file-style:"stroustrup"
02018   c-file-offsets:((innamespace . 0)(inline-open . 0))
02019   indent-tabs-mode:nil
02020   fill-column:79
02021   End:
02022 */
02023 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines