PLearn 0.1
MoleculeTemplateLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // MoleculeTemplateLearner.cc
00004 //
00005 // Copyright (C) 2005 Dan Popovici 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: .pyskeleton_header 544 2003-09-01 00:05:31Z plearner $ 
00037  ******************************************************* */
00038 
00039 // Authors: Dan Popovici
00040 
00044 #include "MoleculeTemplateLearner.h"
00045 #include "WeightedLogGaussian.h"
00046 #include <plearn/var/ConcatRowsVariable.h>
00047 #include <plearn/var/LiftOutputVariable.h>
00048 #include <plearn/var/NegCrossEntropySigmoidVariable.h>
00049 #include <plearn/var/BinaryClassificationLossVariable.h>
00050 #include <plearn/var/ProductVariable.h>
00051 #include <plearn/var/TimesConstantVariable.h>
00052 #include <plearn/var/Var_utils.h>
00053 #include <plearn/var/MaxVariable.h>
00054 #include "NoBpropVariable.h"
00055 
00056 #include <plearn/var/Var.h>
00057 #include <plearn/var/Var_all.h>
00058 #include "plearn/display/DisplayUtils.h"
00059 //#include "linearalign.h"
00060 
00061 namespace PLearn {
00062     using namespace std;
00063 
00064 void displayVarGr(const Var& v, bool display_values)
00065 {
00066         displayVarGraph(v,display_values,0);
00067 }
00068                                                                                                                                                              
00069 void displayVarFn(const Func& f,bool display_values)
00070 {
00071         displayFunction(f,display_values,0);
00072 }
00073 
00074 
00075     MoleculeTemplateLearner::MoleculeTemplateLearner() : 
00076         nhidden(10) , 
00077         weight_decay(0),
00078         noutputs(1),
00079         batch_size(1),
00080         scaling_factor(1),
00081         lrate2(1),
00082             training_mode(true),
00083               builded(false)
00084             /* ### Initialize all fields to their default value here */
00085     {
00086         // load the molecules in a vector
00087 
00088         // ### You may or may not want to call build_() to finish building the object
00089         // build_();
00090     }
00091 
00092     PLEARN_IMPLEMENT_OBJECT(MoleculeTemplateLearner, "ONE LINE DESCRIPTION", "MULTI-LINE \nHELP");
00093 
00094     void MoleculeTemplateLearner::declareOptions(OptionList& ol)
00095     {
00096         // ### Declare all of this object's options here
00097         // ### For the "flags" of each option, you should typically specify  
00098         // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00099         // ### OptionBase::tuningoption. Another possible flag to be combined with
00100         // ### is OptionBase::nosave
00101 
00102         // ### ex:
00103         declareOption(ol, "nhidden", &MoleculeTemplateLearner::nhidden, OptionBase::buildoption,
00104                 "Number of hidden units in first hidden layer (0 means no hidden layer)\n");
00105 
00106         declareOption(ol, "weight_decay", &MoleculeTemplateLearner::weight_decay, OptionBase::buildoption,
00107                 "weight_decay, preaty obvious right :) \n");
00108 
00109         declareOption(ol, "batch_size", &MoleculeTemplateLearner::batch_size, OptionBase::buildoption,
00110                 "How many samples to use to estimate the average gradient before updating the weights\n"
00111                 "0 is equivalent to specifying training_set->length() \n");
00112   
00113         declareOption(ol, "optimizer", &MoleculeTemplateLearner::optimizer, OptionBase::buildoption, 
00114                 "Specify the optimizer to use\n");
00115 
00116         declareOption(ol, "n_active_templates", &MoleculeTemplateLearner::n_active_templates, OptionBase::buildoption, 
00117                 "Specify the index of the molecule to use as seed for the actives\n");
00118         
00119         declareOption(ol, "n_inactive_templates", &MoleculeTemplateLearner::n_inactive_templates, OptionBase::buildoption, 
00120                 "Specify the index of the molecule to use as seed for the inactives\n");
00121         
00122         declareOption(ol, "lrate2", &MoleculeTemplateLearner::lrate2, OptionBase::buildoption, 
00123                 "The lrate2\n");
00124 
00125                 declareOption(ol, "training_mode", &MoleculeTemplateLearner::training_mode, OptionBase::buildoption, 
00126                                 "training_mode\n");
00127 
00128                 declareOption(ol, "templates", &MoleculeTemplateLearner::templates, OptionBase::learntoption, 
00129                                 "templates\n");
00130 
00131                 declareOption(ol, "paramsvalues", &MoleculeTemplateLearner::paramsvalues, OptionBase::learntoption, 
00132                                 "paramsvalues\n");
00133                 
00134 
00135 
00136         
00137         
00138         // Now call the parent class' declareOptions
00139         inherited::declareOptions(ol);
00140     }
00141 
00142     
00143     
00144     void MoleculeTemplateLearner::build_()
00145     {
00146       if ((train_set || !training_mode) && !builded){
00147 
00148       builded = true ; 
00149 
00150                         n_templates = n_active_templates + n_inactive_templates ; 
00151 
00152                         vector<int>  id_templates ; 
00153 
00154                         if (training_mode) {
00155                                 Molecules.clear() ;  
00156                                 Molecule::readMolecules("g1active.txt",Molecules) ; //TODO : make filelist1 an option
00157                                 n_actives = Molecules.size() ; 
00158                                 Molecule::readMolecules("g1inactive.txt",Molecules) ; 
00159 
00160 //                              n_inactives = Molecules.size() - n_actives ;  // TODO : is needed ??
00161 
00162                                 
00163                                 set<int> found ; 
00164                                 Vec t(2) ; 
00165 
00166                                 // find the ids for the active templates 
00167 
00168                                 int nr_find_active = n_active_templates ; 
00169                                 for(int i=0 ; i<train_set.length() ; ++i) { 
00170                                         train_set -> getRow( i , t); 
00171                                         if (nr_find_active > 0 && t[1] == 1 && found.count((int)t[0])==0 ){
00172                                                 nr_find_active -- ; 
00173                                                 id_templates.push_back((int)t[0]);                        
00174                                                 found.insert((int)t[0]);
00175                                         }
00176                                         if (nr_find_active == 0) break ; 
00177                                 }    
00178 
00179                                 if (nr_find_active > 0){
00180                                         PLERROR("There are not enought actives in the dataset") ; 
00181                                 }
00182 
00183                                 int nr_find_inactive = n_inactive_templates ; 
00184                                 for(int i=0 ; i<train_set.length() ; ++i) { 
00185                                         train_set -> getRow( i , t); 
00186                                         if (nr_find_inactive > 0 && t[1] == 0 && found.count((int)t[0])==0 ){
00187                                                 nr_find_inactive -- ; 
00188                                                 id_templates.push_back((int)t[0]);                        
00189                                                 found.insert((int)t[0]);
00190                                         }
00191                                         if (nr_find_inactive == 0) break ; 
00192                                 }    
00193 
00194                                 if (nr_find_inactive > 0){
00195                                         PLERROR("There are not enought inactives in the dataset") ; 
00196                                 }
00197 
00198                         }                        
00199             
00200             input_index = Var(1,"input_index") ; 
00201             
00202             mu.resize(n_templates) ;
00203             sigma.resize(n_templates) ; 
00204             sigma_square.resize(n_templates) ;
00205             S.resize(n_templates) ; 
00206             
00207 
00208             templates.resize(n_templates)  ; 
00209 
00210             for(int i=0 ; i<n_templates ; ++i) { 
00211 
00212                                 if (training_mode) {
00213                                         mu[i] = Var(Molecules[id_templates[i]]->chem.length() , Molecules[id_templates[i]]->chem.width() , "Mu") ; 
00214                                         mu[i]->matValue << Molecules[id_templates[i]]->chem ;  
00215 
00216                                         sigma[i] = Var(Molecules[id_templates[i]]->chem.length() , Molecules[id_templates[i]]->chem.width() , "Sigma") ; 
00217                                         sigma[i]->value.fill(0) ; 
00218 
00219                                 }
00220                                 else {                          
00221                                         mu[i] = Var(templates[i]->chem.length() , templates[i]->chem.width() ,"Mu" ) ;                                  
00222                                         sigma[i] = Var(templates[i]->dev.length() , templates[i]->dev.width() , "Sigma") ; 
00223                                 }
00224                 
00225                 params.push_back(mu[i]) ;
00226                 params.push_back(sigma[i]) ;
00227 
00228                 if (training_mode)
00229                     sigma_square[i] = new ExpVariable(sigma[i]) ; 
00230                 else
00231                     sigma_square[i] = sigma[i] ; 
00232 
00233                     
00234 
00235 //                              if (!training_mode) {
00236 //                                      sigma_square[i]->value.fill(1) ; 
00237 //                              }
00238                                 
00239                 if (training_mode) {
00240 
00241                                 templates[i] = new Template() ; 
00242                 templates[i]->chem.resize(mu[i]->matValue.length() , mu[i]->matValue.width()) ; 
00243                 templates[i]->chem << mu[i]->matValue ; 
00244                 
00245                 templates[i]->geom.resize(Molecules[id_templates[i]]->geom.length() , Molecules[id_templates[i]]->geom.width()) ;                 
00246                 templates[i]->geom << Molecules[id_templates[i]]->geom ;                 
00247                 templates[i]->vrml_file = Molecules[id_templates[i]]->vrml_file ; 
00248                 templates[i]->dev.resize (sigma_square[i]->matValue.length() ,  sigma_square[i]->matValue.width() ) ; 
00249                 templates[i]->dev << sigma_square[i]->matValue ;  // SIGMA_SQUARE has not the right value yet ??????
00250 
00251                                 }
00252             
00253             }
00254                         
00255             for(int i=0 ; i<n_templates ; ++i) 
00256                                         S[i] = new WeightedLogGaussian(training_mode , i, input_index, mu[i], sigma_square[i] , templates[i]) ; 
00257 
00258             
00259             V = Var(nhidden , n_templates , "V") ; 
00260             V_b = Var(nhidden , 1 , "V_b") ;
00261 //            V_direct = Var(1 , 2  , "V_direct") ; 
00262             
00263             mu_S.resize(n_templates) ; 
00264             sigma_S.resize(n_templates) ; 
00265             sigma_square_S.resize(n_templates) ; 
00266 
00267 
00268             S_after_scaling.resize(n_templates) ; 
00269             
00270             sigma_s_vec.resize(n_templates) ; 
00271             
00272             for(int i=0 ; i<n_templates ; ++i) { 
00273                 mu_S[i] = Var(1 , 1) ;                 
00274                 sigma_S[i] = Var(1 , 1) ;
00275                 if (training_mode)
00276                     sigma_square_S[i] = new SquareVariable(sigma_S[i]) ;
00277                 else
00278                     sigma_square_S[i] = sigma_S[i] ; 
00279 
00280                 params.push_back(mu_S[i]);
00281                 params.push_back(sigma_S[i]);
00282                 S_after_scaling[i] = new DivVariable(S[i] - mu_S[i] , sigma_square_S[i] ) ; 
00283             }
00284             
00285             S_std.resize(n_templates)  ; 
00286             
00287           
00288             for(int i=0 ; i<n_templates ; ++i) { 
00289                 
00290                 S_after_scaling[i] = new NoBpropVariable (S_after_scaling[i] , &S_std[i] ) ;
00291                 
00292             }
00293 
00294             
00295             temp_S = new ConcatRowsVariable(S_after_scaling) ; 
00296             hl = tanh(product(V,temp_S) + V_b) ; 
00297             
00298             params.push_back(V);
00299             params.push_back(V_b);
00300 //          params.push_back(V_direct);
00301 
00302             W = Var(1, nhidden) ; 
00303 
00304             W_b = Var(1 , 1) ; 
00305             
00306             y_before_transfer = (product(W,hl) + W_b); //+product(V_direct , temp_S)) ;
00307             y = sigmoid(y_before_transfer) ;
00308             
00309             
00310             params.push_back(W);
00311     
00312             penalties.append(affine_transform_weight_penalty(V, (weight_decay), 0, "L1"));
00313 
00314             params.push_back(W_b);
00315 
00316                         // initialize all the parameters
00317                         if (training_mode) {
00318 
00319                                 paramsvalues.resize(params.nelems());
00320 
00321                                 for(int i=0 ; i<n_templates ; ++i) { 
00322                                         mu_S[i]->value.fill(0) ; 
00323                                         sigma_S[i]->value.fill(1) ; 
00324                                 }
00325                                 Vec t_mean(n_templates) , t_std(n_templates) ; 
00326 
00327                                 compute_S_mean_std(t_mean,t_std) ;        
00328 
00329                                 for(int i=0 ; i<n_templates ; ++i) { 
00330                                         mu_S[i]->value[0] = t_mean[i] ;         
00331                                         sigma_S[i]->value[0] = sqrt(t_std[i]) ; 
00332                                 }
00333 
00334                                 for(int i=0 ; i<n_templates ; ++i) { 
00335                                         S_std[i] = lrate2 ; 
00336                                 }
00337 
00338                                 manual_seed(seed_) ;
00339 
00340                                 fill_random_uniform(V->matValue,-1,1) ;         
00341                                 fill_random_uniform(V_b->matValue,-1,1) ;                 
00342                                 //        fill_random_uniform(V_direct->matValue,-0.0001,0.0001) ;                 
00343                                 fill_random_uniform(W->matValue,-1,1) ; 
00344                                 fill_random_uniform(W_b->matValue,-1,1) ; 
00345 
00346 
00347                         }
00348                         else {                  
00349                                 params << paramsvalues;
00350                         }
00351 
00352                         params.makeSharedValue(paramsvalues);
00353 
00354             if (!training_mode) {
00355 
00356                 for(int i=0 ; i<n_templates ; ++i) { 
00357                     sigma_S[i]->value[0] *= sigma_S[i]->value[0] ; 
00358                 }
00359                 
00360                 for(int i=0 ; i<n_templates ; ++i) {                     
00361                     for(int j=0 ; j<sigma_square[i]->matValue.length() ; ++j) { 
00362                         for(int k=0 ; k<sigma_square[i]->matValue.width() ; ++k) { 
00363                             sigma_square[i]->matValue[j][k] = exp(sigma[i]->matValue[j][k]) ; 
00364                         }
00365                     }
00366                 }
00367             }
00368 
00369 /*            
00370             for(int i=0 ; i<n_templates ; ++i) {     
00371                 sigma_s_vec[i] = sigma_S[i]->value[0] ; 
00372             }
00373 */            
00374             
00375 
00376             target = Var(1 , "the target") ; 
00377 
00378             costs.resize(3) ; 
00379             
00380             costs[0] = stable_cross_entropy(y_before_transfer , target) ; 
00381             costs[1] = binary_classification_loss(y, target);
00382             costs[2] = lift_output(y , target);
00383             
00384             
00385             f_output = Func(input_index, y) ; 
00386 //            displayVarFn(f_output , 0) ; 
00387             
00388 
00389             training_cost = hconcat(sum(hconcat(costs[0] & penalties)));
00390             training_cost->setName("training cost");
00391 
00392             test_costs = hconcat(costs);
00393             test_costs->setName("testing cost");
00394             
00395             output_target_to_costs = Func(y & target , test_costs) ; 
00396 
00397             test_costf = Func(input_index & target , y & test_costs);
00398 
00399         }
00400     }
00401 
00402     // ### Nothing to add here, simply calls build_
00403     void MoleculeTemplateLearner::build()
00404     {
00405         inherited::build();
00406         build_();
00407     }
00408 
00409 
00410     void MoleculeTemplateLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00411     {
00412         inherited::makeDeepCopyFromShallowCopy(copies);
00413 
00414         // ### Call deepCopyField on all "pointer-like" fields 
00415         // ### that you wish to be deepCopied rather than 
00416         // ### shallow-copied.
00417         // ### ex:
00418         // deepCopyField(trainvec, copies);
00419   
00420           varDeepCopyField(input_index,copies) ; 
00421           deepCopyField(mu , copies) ; 
00422           deepCopyField(sigma , copies) ; 
00423           deepCopyField(mu_S , copies) ; 
00424           deepCopyField(sigma_S , copies) ; 
00425           deepCopyField(sigma_square_S , copies) ; 
00426           deepCopyField(sigma_square , copies) ; 
00427           deepCopyField(S , copies) ; 
00428           deepCopyField(S_after_scaling, copies) ; 
00429           deepCopyField(params , copies) ; 
00430           deepCopyField(penalties , copies) ; 
00431           
00432           varDeepCopyField(V,copies) ; 
00433           varDeepCopyField(W,copies) ; 
00434           varDeepCopyField(V_b,copies) ; 
00435           varDeepCopyField(W_b,copies) ; 
00436           varDeepCopyField(V_direct,copies) ; 
00437           varDeepCopyField(hl,copies) ; 
00438           varDeepCopyField(y,copies) ; 
00439           varDeepCopyField(y_before_transfer,copies) ; 
00440           varDeepCopyField(training_cost,copies) ; 
00441           varDeepCopyField(test_costs,copies) ; 
00442           varDeepCopyField(target,copies) ; 
00443           varDeepCopyField(temp_S,copies) ; 
00444           
00445           deepCopyField(costs , copies) ; 
00446           deepCopyField(temp_output , copies) ; 
00447           deepCopyField(S_std , copies) ; 
00448           deepCopyField(sigma_s_vec , copies) ; 
00449           deepCopyField(f_output , copies) ; 
00450           deepCopyField(output_target_to_costs , copies) ; 
00451           deepCopyField(test_costf , copies) ; 
00452           deepCopyField(optimizer , copies) ; 
00453           deepCopyField(templates , copies) ; 
00454           deepCopyField(paramsvalues , copies) ; 
00455           
00456     
00457     }
00458 
00459 
00460     int MoleculeTemplateLearner::outputsize() const
00461     {
00462         // Compute and return the size of this learner's output (which typically
00463         // may depend on its inputsize(), targetsize() and set options).
00464         return noutputs ; 
00465 
00466     }
00467 
00468     void MoleculeTemplateLearner::forget()
00469     {
00472 
00478         initializeParams() ; 
00479     }
00480     
00481     void MoleculeTemplateLearner::compute_S_mean_std(Vec & t_mean , Vec & t_std){
00482         
00483         int l = train_set->length() ; 
00484 
00485         Vec current_S(n_templates) ; 
00486         Func computeS(input_index , temp_S ) ;         
00487         
00488         Mat valueS(l,n_templates) ; 
00489         Vec training_row(2) ;         
00490         Vec current_index(1) ; 
00491 
00492         t_mean.fill(0) ; 
00493         t_std.fill(0) ; 
00494             
00495         
00496         computeS->recomputeParents();
00497         
00498         FILE * f = fopen("nicolas.txt","wt") ; 
00499         
00500         for(int i=0 ; i<l ; ++i) { 
00501             
00502             train_set->getRow(i , training_row) ; 
00503             current_index[0] = training_row[0] ; 
00504                         
00505             for(int j=0 ; j<n_templates ; ++j) { 
00506                 PP<WeightedLogGaussian> ppp = dynamic_cast<WeightedLogGaussian*>( (Variable*) S[j]); //->molecule = Molecules[(int)training_row[0]] ; 
00507                 ppp->molecule = Molecules[(int)training_row[0]] ; 
00508             }
00509 
00510             computeS->fprop(current_index , current_S ) ;             
00511             
00512             for(int j=0 ; j<n_templates ; ++j) { 
00513                 valueS[i][j] = current_S[j] ; 
00514                 t_mean[j] += current_S[j] ; 
00515                 cout << i << " " << current_S[j]  << endl ; 
00516             }
00517             fprintf( f , "%f %f %d\n" ,  current_S[0] , current_S[1] , training_row[1] > 0 ? 1 : -1 ) ; 
00518             
00519         }
00520         fclose(f) ; 
00521 
00522         for(int i=0 ; i<n_templates ; ++i) { 
00523             t_mean[i]/= l  ; t_mean[i]/=l ; 
00524         }
00525 
00526         for(int i=0 ; i<l ; ++i) {
00527             for(int j=0 ; j<n_templates ; ++j) { 
00528                 t_std[j] += square(valueS[i][j] - t_mean[j]) ; 
00529             }
00530         }
00531 
00532         for(int i=0 ; i<n_templates ; ++i) { 
00533             t_std[i] /= l ; 
00534             t_std[i] = sqrt(t_std[i]) ; 
00535         }
00536 
00537     }
00538     void MoleculeTemplateLearner::train()
00539     {
00540         if(!train_stats)  // make a default stats collector, in case there's none
00541             train_stats = new VecStatsCollector();
00542 
00543         int l = train_set->length();
00544 
00545         int nsamples = 1;
00546 
00547         Func paramf = Func(input_index & target, training_cost); // parameterized function to optimize
00548 
00549 
00550         Var totalcost = meanOf(train_set, paramf, nsamples);
00551         if(optimizer)
00552         {
00553             optimizer->setToOptimize(params, totalcost);
00554             optimizer->build();
00555             optimizer->reset();
00556         }
00557         else PLERROR("EntropyContrastLearner::train can't train without setting an optimizer first!");
00558         ProgressBar* pb = 0;
00559         if(report_progress>0) {
00560             pb = new ProgressBar("Training MoleculeTemplateLearner stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
00561         }
00562 
00563 
00564 
00565 //        int optstage_per_lstage = l/nsamples;
00566 
00567                
00568         while(stage<nstages)
00569         {
00570             optimizer->nstages = 1 ; // optstage_per_lstage;
00571             double mean_error = 0.0 ; 
00572 
00573             for(int k=0 ; k<train_set->length() ; ++k) { 
00574 
00575 
00576                 
00577                 //update the template 
00578                 for(int i=0 ; i<n_templates ; ++i) 
00579                 {
00580                     templates[i]->chem << mu[i]->matValue ; 
00581 
00582                     templates[i]->dev << sigma_square[i]->matValue ; 
00583 
00584                 }
00585                 //align only the next training example
00586                 Mat temp_mat ;                 
00587                 Vec training_row(2) ; 
00588                                 train_set->getRow(k , training_row) ; 
00589                 for(int i=0 ; i<n_templates ; ++i) { 
00590                         
00591 //                                              string s =  train_set->getString(k,0) ; 
00592 //                        performLP(Molecules[(int)training_row[0]],templates[i], temp_mat , false) ; 
00593 //                        W_lp[i][(int)training_row[0]]->matValue << temp_mat ; 
00594                                 PP<WeightedLogGaussian> ppp = dynamic_cast<WeightedLogGaussian*>( (Variable*) S[i]); //->molecule = Molecules[(int)training_row[0]] ; 
00595                     ppp->molecule = Molecules[(int)training_row[0]] ; 
00596 //                                      S[i]->molecule = Molecules[(int)training_row[0]] ; 
00597                 }
00598 
00599                 // clear statistics of previous epoch
00600                 train_stats->forget();
00601 
00602 //                displayVarFn(f_output , true) ; 
00603                                 
00604                 optimizer->optimizeN(*train_stats);
00605 //                temp_S->verifyGradient(1e-4) ; 
00606 
00607                 train_stats->finalize(); // finalize statistics for this epoch
00608                 cout << "Example " << k << " train objective: " << train_stats->getMean() << endl;
00609                 mean_error += train_stats->getMean()[0] ; 
00610                 
00611                 if(pb)
00612                     pb->update(stage);
00613             }
00614            
00615             
00616             cout << endl << endl <<"Epoch " << stage << " mean error " << mean_error/l << endl << endl; 
00617 
00618             
00619             ++stage;
00620             
00621 
00622 
00623         }        
00624 /*
00625         Mat temp_mat ;                 
00626         for(int i=0 ; i<n_templates ; ++i) { 
00627             W_lp[i].resize(Molecules.size()) ; 
00628             for(unsigned int j=0 ; j<Molecules.size() ; ++j) { 
00629                 performLP(Molecules[j],templates[i], temp_mat , false) ; 
00630                 W_lp[i][j]->matValue << temp_mat ; 
00631             }
00632         }
00633 */
00634         for(int i=0 ; i<n_templates ; ++i) {         
00635             cout << "mu[0]" << mu[i]->matValue << endl ; 
00636             cout << "sigma[0]" << sigma_square[i]->matValue << endl ; 
00637         }
00638 
00639 
00640         output_target_to_costs->recomputeParents();
00641         test_costf->recomputeParents();
00642 
00643 //              molecule = NULL ; 
00644 
00645     }
00646 
00647 
00648     void MoleculeTemplateLearner::computeOutput(const Vec& input, Vec& output) const
00649     {
00650         output.resize(1);
00651         f_output->fprop(input,output) ;
00652 
00653     }    
00654 
00655     void MoleculeTemplateLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00656             const Vec& target, Vec& costsv) const
00657     {
00658         PLERROR("You are not allowed to reach this function :((((") ; 
00659         // Compute the costs from *already* computed output. 
00660         output_target_to_costs->fprop(output & target , costsv) ; 
00661     }                                
00662 
00663     void MoleculeTemplateLearner::computeOutputAndCosts(const Vec& inputv, const Vec& targetv,
00664             Vec& outputv, Vec& costsv) const
00665     {
00666         test_costf->fprop(inputv&targetv, outputv&costsv);
00667     }
00668    
00669     TVec<string> MoleculeTemplateLearner::getTestCostNames() const
00670     {
00671         // Return the names of the costs computed by computeCostsFromOutpus
00672         // (these may or may not be exactly the same as what's returned by getTrainCostNames).
00673         // ...
00674 
00675         //TODO : put some code here 
00676         TVec<string> t(3) ; 
00677         t[0] = "NLL" ; 
00678         t[1] = "binary_class_error" ;
00679         t[2]  = "lift_output" ;
00680         return t ; 
00681     }
00682 
00683     TVec<string> MoleculeTemplateLearner::getTrainCostNames() const
00684     {
00685         TVec<string> t(3) ; 
00686         t[0] = "NLL" ; 
00687         t[1] = "binary_class_error" ;
00688         t[2] = "lift_output" ; 
00689         return t ; 
00690     }
00691 
00692     void MoleculeTemplateLearner::initializeParams(){
00693        
00694 
00695     }
00696     void MoleculeTemplateLearner::test(VMat testset, PP<VecStatsCollector> test_stats, 
00697                       VMat testoutputs, VMat testcosts)const {
00698         for(int i=0 ; i<n_templates ; ++i) { 
00699                 PP<WeightedLogGaussian> ppp = dynamic_cast<WeightedLogGaussian*>( (Variable*) S[i]); //->molecule = Molecules[(int)training_row[0]] ; 
00700                 ppp->test_set = testset ; 
00701         }
00702 
00703         inherited::test(testset , test_stats , testoutputs , testcosts) ; 
00704     }
00705 
00706 
00707 } // end of namespace PLearn
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines