PLearn 0.1
AdaBoost.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // AdaBoost.cc
00004 //
00005 // Copyright (C) 2003  Pascal Vincent 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: AdaBoost.cc 10265 2009-07-13 14:05:08Z nouiz $
00037  ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio
00040 
00043 #include "AdaBoost.h"
00044 #include <plearn/math/pl_math.h>
00045 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00046 #include <plearn/vmat/SelectRowsVMatrix.h>
00047 #include <plearn/vmat/MemoryVMatrix.h>
00048 #include <plearn/math/random.h>
00049 #include <plearn/io/load_and_save.h>
00050 #include <plearn/base/stringutils.h>
00051 #include <plearn_learners/regressors/RegressionTreeRegisters.h>
00052 #define PL_LOG_MODULE_NAME "AdaBoost"
00053 #include <plearn/io/pl_log.h>
00054 
00055 namespace PLearn {
00056 using namespace std;
00057 
00058 AdaBoost::AdaBoost()
00059     : sum_voting_weights(0.0), 
00060       initial_sum_weights(0.0),
00061       found_zero_error_weak_learner(0),
00062       target_error(0.5), 
00063       provide_learner_expdir(false),
00064       output_threshold(0.5), 
00065       compute_training_error(1), 
00066       pseudo_loss_adaboost(1), 
00067       conf_rated_adaboost(0), 
00068       weight_by_resampling(1), 
00069       early_stopping(1),
00070       save_often(0),
00071       forward_sub_learner_test_costs(false),
00072       modif_train_set_weights(false),
00073       reuse_test_results(false)
00074 { }
00075 
00076 PLEARN_IMPLEMENT_OBJECT(
00077     AdaBoost,
00078     "AdaBoost boosting algorithm for TWO-CLASS classification",
00079     "Given a classification weak-learner, this algorithm \"boosts\" it in\n"
00080     "order to obtain a much more powerful classification algorithm.\n"
00081     "The classifier is two-class, returning 0 or 1, or a number between 0 and 1.\n"
00082     "In the latter case, the user can use two different versions of AdaBoost:\n"
00083     " - \"Pseudo-loss\" AdaBoost:    see \"Experiments with a New Boosting \n"
00084     "                                  Algorithm\" by Freund and Schapire.\n"
00085     "                                  Set the 'pseudo_loss_adaboost' option\n"
00086     "                                  to select this version\n"
00087     "\n"
00088     " - \"Confidence-rated\" AdaBoost: see \"Improved Boosting Algorithms Using\n"
00089     "                                Confidence-rated Predictions\" by\n"
00090     "                                Schapire and Singer.\n"
00091     "                                Set the 'conf_rated_adaboost' option\n"
00092     "                                to select this version.\n"
00093     "These versions compute a more precise notion of error, taking into \n"
00094     "account the precise value outputted by the soft classifier.\n"
00095     "Also, \"Confidence-rated\" AdaBoost uses a line search at each stage to\n"
00096     "compute the weight of the trained weak learner.\n\n"
00097     "It should be noted that, except for the optimization of the weak learners,\n"
00098     "\"Confidence-rated\" AdaBoost is equivalent to MarginBoost (see \n"
00099     "\"Functional Gradient Techniques for Combining Hypotheses\" by \n"
00100     "Mason et al.) when using the exponential loss on the margin. Hence, the\n"
00101     "'conf_rated_adaboost' option can be used in that case too, and all that\n"
00102     "needs to be adjusted is the choice of weak learners.\n\n"
00103     "The nstages option from PLearner is used to specify the desired\n"
00104     "number of boosting rounds (but the algorithm can stop earlier if\n"
00105     "the next weak learner is unable to make significant progress or if\n"
00106     "the weak learner has 0 error on the training set).\n");
00107 
00108 void AdaBoost::declareOptions(OptionList& ol)
00109 {
00110     declareOption(ol, "weak_learners", &AdaBoost::weak_learners,
00111                   OptionBase::learntoption,
00112                   "The vector of learned weak learners");
00113 
00114     declareOption(ol, "voting_weights", &AdaBoost::voting_weights,
00115                   OptionBase::learntoption,
00116                   "Weights given to the weak learners (their output is\n"
00117                   "linearly combined with these weights\n"
00118                   "to form the output of the AdaBoost learner).\n");
00119 
00120     declareOption(ol, "sum_voting_weights", &AdaBoost::sum_voting_weights,
00121                   OptionBase::learntoption,
00122                   "Sum of the weak learners voting weights.\n");
00123   
00124     declareOption(ol, "initial_sum_weights", &AdaBoost::initial_sum_weights,
00125                   OptionBase::learntoption,
00126                   "Initial sum of weights on the examples. Do not temper with.\n");
00127 
00128     declareOption(ol, "example_weights", &AdaBoost::example_weights,
00129                   OptionBase::learntoption,
00130                   "The current weights of the examples.\n");
00131 
00132     declareOption(ol, "learners_error", &AdaBoost::learners_error,
00133                   OptionBase::learntoption,
00134                   "The error of each learners.\n");
00135 
00136     declareOption(ol, "weak_learner_template", &AdaBoost::weak_learner_template,
00137                   OptionBase::buildoption,
00138                   "Template for the regression weak learner to be"
00139                   "boosted into a classifier");
00140 
00141     declareOption(ol, "target_error", &AdaBoost::target_error,
00142                   OptionBase::buildoption,
00143                   "This is the target average weighted error below"
00144                   "which each weak learner\n"
00145                   "must reach after its training (ordinary adaboost:"
00146                   "target_error=0.5).");
00147 
00148     declareOption(ol, "pseudo_loss_adaboost", &AdaBoost::pseudo_loss_adaboost,
00149                   OptionBase::buildoption,
00150                   "Whether to use Pseudo-loss Adaboost (see \"Experiments with\n"
00151                   "a New Boosting Algorithm\" by Freund and Schapire), which\n"
00152                   "takes into account the precise value outputted by\n"
00153                   "the soft classifier.");
00154 
00155     declareOption(ol, "conf_rated_adaboost", &AdaBoost::conf_rated_adaboost,
00156                   OptionBase::buildoption,
00157                   "Whether to use Confidence-rated AdaBoost (see \"Improved\n"
00158                   "Boosting Algorithms Using Confidence-rated Predictions\" by\n"
00159                   "Schapire and Singer) which takes into account the precise\n"
00160                   "value outputted by the soft classifier. It also searchs\n"
00161                   "the weight of a weak learner using a line search according\n"
00162                   "to a criteria which is more appropriate for soft classifiers.\n"
00163                   "This option can also be used to obtain MarginBoost with the\n"
00164                   "exponential loss, provided that an appropriate choice of\n"
00165                   "weak learner is made by the user (see \"Functional Gradient\n"
00166                   "Techniques for Combining Hypotheses\" by Mason et al.).\n");
00167 
00168     declareOption(ol, "weight_by_resampling", &AdaBoost::weight_by_resampling,
00169                   OptionBase::buildoption,
00170                   "Whether to train the weak learner using resampling"
00171                   " to represent the weighting\n"
00172                   "given to examples. If false then give these weights "
00173                   "explicitly in the training set\n"
00174                   "of the weak learner (note that some learners can accomodate "
00175                   "weights well, others not).\n");
00176 
00177     declareOption(ol, "output_threshold", &AdaBoost::output_threshold,
00178                   OptionBase::buildoption,
00179                   "To interpret the output of the learner as a class, it is "
00180                   "compared to this\n"
00181                   "threshold: class 1 if greater than output_threshold, class "
00182                   "0 otherwise.\n");
00183 
00184     declareOption(ol, "provide_learner_expdir", &AdaBoost::provide_learner_expdir,
00185                   OptionBase::buildoption,
00186                   "If true, each weak learner to be trained will have its\n"
00187                   "experiment directory set to WeakLearner#kExpdir/");
00188 
00189     declareOption(ol, "early_stopping", &AdaBoost::early_stopping, 
00190                   OptionBase::buildoption,
00191                   "If true, then boosting stops when the next weak learner\n"
00192                   "is too weak (avg error > target_error - .01)\n");
00193 
00194     declareOption(ol, "save_often", &AdaBoost::save_often, 
00195                   OptionBase::buildoption,
00196                   "If true, then save the model after training each weak\n"
00197                   "learner, under <expdir>/model.psave\n");
00198 
00199     declareOption(ol, "compute_training_error", 
00200                   &AdaBoost::compute_training_error, OptionBase::buildoption,
00201                   "Whether to compute training error at each stage.\n");
00202 
00203     declareOption(ol, "forward_sub_learner_test_costs", 
00204                   &AdaBoost::forward_sub_learner_test_costs, OptionBase::buildoption,
00205                   "Did we add the sub_learner_costs to our costs.\n");
00206 
00207     declareOption(ol, "modif_train_set_weights", 
00208                   &AdaBoost::modif_train_set_weights, OptionBase::buildoption,
00209                   "Did we modif directly the train_set weights?\n");
00210 
00211     declareOption(ol, "found_zero_error_weak_learner", 
00212                   &AdaBoost::found_zero_error_weak_learner, 
00213                   OptionBase::learntoption,
00214                   "Indication that a weak learner with 0 training error"
00215                   "has been found.\n");
00216 
00217     declareOption(ol, "weak_learner_output",
00218                   &AdaBoost::weak_learner_output,
00219                   OptionBase::nosave,
00220                   "A temp vector that contain the weak learner output\n");
00221 
00222     declareOption(ol, "reuse_test_results",
00223                   &AdaBoost::reuse_test_results,
00224                   OptionBase::buildoption,
00225                   "If true we save and reuse previous call to test(). This is"
00226                   " usefull to have a test time that is independent of the"
00227                   " number of adaboost itaration.\n");
00228 
00229      declareOption(ol, "saved_testset",
00230                   &AdaBoost::saved_testset,
00231                   OptionBase::nosave,
00232                   "Used with reuse_test_results\n");
00233 
00234      declareOption(ol, "saved_testoutputs",
00235                   &AdaBoost::saved_testoutputs,
00236                   OptionBase::nosave,
00237                   "Used with reuse_test_results\n");
00238 
00239      declareOption(ol, "saved_last_test_stages",
00240                   &AdaBoost::saved_last_test_stages,
00241                   OptionBase::nosave,
00242                   "Used with reuse_test_results\n");
00243 
00244    // Now call the parent class' declareOptions
00245     inherited::declareOptions(ol);
00246 
00247     declareOption(ol, "train_set",
00248                   &AdaBoost::train_set,
00249                   OptionBase::learntoption|OptionBase::nosave,
00250                   "The training set, so we can reload it.\n");
00251 
00252 }
00253 
00254 void AdaBoost::build_()
00255 {
00256     if(conf_rated_adaboost && pseudo_loss_adaboost)
00257         PLERROR("In Adaboost:build_(): conf_rated_adaboost and pseudo_loss_adaboost cannot both be true, a choice must be made");
00258 
00259     
00260     int n = 0;
00261 //why we don't always use weak_learner_template?
00262     if(weak_learners.size()>0)
00263         n=weak_learners[0]->outputsize();
00264     else if(weak_learner_template)
00265         n=weak_learner_template->outputsize();
00266     weak_learner_output.resize(n);
00267     
00268     //for RegressionTreeNode
00269     if(getTrainingSet())
00270         setTrainingSet(getTrainingSet(),false);
00271 }
00272 
00274 // build //
00276 void AdaBoost::build()
00277 {
00278     inherited::build();
00279     build_();
00280 }
00281 
00283 // makeDeepCopyFromShallowCopy //
00285 void AdaBoost::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00286 {
00287     inherited::makeDeepCopyFromShallowCopy(copies);
00288 
00289     deepCopyField(weighted_costs,           copies);
00290     deepCopyField(sum_weighted_costs,       copies);
00291     deepCopyField(saved_testset,            copies);
00292     deepCopyField(saved_testoutputs,        copies);
00293     deepCopyField(saved_last_test_stages,   copies);
00294 
00295     deepCopyField(learners_error,           copies);
00296     deepCopyField(example_weights,          copies);
00297     deepCopyField(weak_learner_output,      copies);
00298     deepCopyField(voting_weights,           copies);
00299     deepCopyField(weak_learners,            copies);
00300     deepCopyField(weak_learner_template,    copies);
00301 }
00302 
00304 // outputsize //
00306 int AdaBoost::outputsize() const
00307 {
00308     // Outputsize is always 2, since this is a 0-1 classifier
00309     // and we append the weighted sum to allow the reuse of previous test
00310     if(reuse_test_results)
00311         return 2;
00312     else 
00313         return 1;
00314 }
00315 
00316 void AdaBoost::finalize()
00317 {
00318     inherited::finalize();
00319     for(int i=0;i<weak_learners.size();i++){
00320         weak_learners[i]->finalize();
00321     }
00322     if(train_set && train_set->classname()=="RegressionTreeRegisters")
00323         ((PP<RegressionTreeRegisters>)train_set)->finalize();
00324 }
00325 
00326 void AdaBoost::forget()
00327 {
00328     stage = 0;
00329     learners_error.resize(0, nstages);
00330     weak_learners.resize(0, nstages);
00331     voting_weights.resize(0, nstages);
00332     sum_voting_weights = 0;
00333     found_zero_error_weak_learner=false;
00334     if (seed_ >= 0)
00335         manual_seed(seed_);
00336     else
00337         PLearn::seed();
00338 }
00339 
00340 void AdaBoost::train()
00341 {
00342 
00343     if(nstages==stage)
00344         return;
00345     else if (nstages < stage){        
00346         PLCHECK(nstages>0); // should use forget
00347         NORMAL_LOG<<"In AdaBoost::train() - reverting from stage "<<stage
00348                   <<" to stage "<<nstages<<endl;
00349         stage = nstages;
00350         PLCHECK(learners_error.size()>=stage);
00351         PLCHECK(weak_learners.size()>=stage);
00352         PLCHECK(voting_weights.size()>=stage);
00353         PLCHECK(nstages>0);
00354         learners_error.resize(stage);
00355         weak_learners.resize(stage);
00356         voting_weights.resize(stage);
00357         sum_voting_weights = sum(voting_weights);
00358         found_zero_error_weak_learner=false;
00359 
00360         example_weights.resize(0);
00361         return;
00362         //need examples_weights
00363         //computeTrainingError();
00364 
00365     }else if(nstages>0 && stage>0 && example_weights.size()==0){
00366         PLERROR("In AdaBoost::train() -  we can't retrain a reverted learner...");
00367     }
00368     
00369     if(found_zero_error_weak_learner) // Training is over...
00370         return;
00371 
00372     Profiler::pl_profile_start("AdaBoost::train");
00373 
00374     if(!train_set)
00375         PLERROR("In AdaBoost::train, you did not setTrainingSet");
00376     
00377     if(!train_stats && compute_training_error)
00378         PLERROR("In AdaBoost::train, you did not setTrainStatsCollector");
00379 
00380     if (train_set->targetsize()!=1)
00381         PLERROR("In AdaBoost::train, targetsize should be 1, found %d", 
00382                 train_set->targetsize());
00383 
00384     if(modif_train_set_weights && train_set->weightsize()!=1)
00385         PLERROR("In AdaBoost::train, when modif_train_set_weights is true"
00386                 " the weightsize of the trainset must be one.");
00387     
00388     PLCHECK_MSG(train_set->inputsize()>0, "In AdaBoost::train, the inputsize"
00389                 " of the train_set must be know.");
00390 
00391 
00392     Vec input;
00393     Vec output;
00394     Vec target;
00395     real weight;
00396 
00397     Vec examples_error;
00398 
00399     const int n = train_set.length();
00400     TVec<int> train_indices;
00401     Vec pseudo_loss;
00402 
00403     input.resize(inputsize());
00404     output.resize(weak_learner_template->outputsize());// We use only the first one as the output from the weak learner
00405     target.resize(targetsize());
00406     examples_error.resize(n);
00407 
00408     if (stage==0)
00409     {
00410         example_weights.resize(n);
00411         if (train_set->weightsize()>0)
00412         {
00413             PP<ProgressBar> pb;
00414             initial_sum_weights=0;
00415             int weight_col = train_set->inputsize()+train_set->targetsize();
00416             for (int i=0; i<n; ++i) {
00417                 weight=train_set->get(i,weight_col);
00418                 example_weights[i]=weight;
00419                 initial_sum_weights += weight;
00420             }
00421             example_weights *= real(1.0)/initial_sum_weights;
00422         }
00423         else 
00424         {
00425             example_weights.fill(1.0/n);
00426             initial_sum_weights = 1;
00427         }
00428         sum_voting_weights = 0;
00429         voting_weights.resize(0,nstages);
00430 
00431     } else
00432         PLCHECK_MSG(example_weights.length()==n,"In AdaBoost::train - the train"
00433                     " set should not change between each train without a forget!");
00434 
00435     VMat unweighted_data = train_set.subMatColumns(0, inputsize()+1);
00436     learners_error.resize(nstages);
00437 
00438     for ( ; stage < nstages ; ++stage)
00439     {
00440         VMat weak_learner_training_set;
00441         { 
00442             // We shall now construct a training set for the new weak learner:
00443             if (weight_by_resampling)
00444             {
00445                 PP<ProgressBar> pb;
00446                 if(report_progress) pb = new ProgressBar(
00447                     "AdaBoost round " + tostring(stage) +
00448                     ": making training set for weak learner", n);
00449 
00450                 // use a "smart" resampling that approximated sampling 
00451                 // with replacement with the probabilities given by 
00452                 // example_weights.
00453                 map<real,int> indices;
00454                 for (int i=0; i<n; ++i) {
00455                     if(report_progress) pb->update(i);
00456                     real p_i = example_weights[i];
00457                     // randomly choose how many repeats of example i
00458                     int n_samples_of_row_i = 
00459                         int(rint(gaussian_mu_sigma(n*p_i,sqrt(n*p_i*(1-p_i))))); 
00460                     for (int j=0;j<n_samples_of_row_i;j++)
00461                     {
00462                         if (j==0)
00463                             indices[i]=i;
00464                         else
00465                         {
00466                             // put the others in random places
00467                             real k=n*uniform_sample(); 
00468                             // while avoiding collisions
00469                             indices[k]=i; 
00470                         }
00471                     }
00472                 }
00473                 train_indices.resize(0,n);
00474                 map<real,int>::iterator it = indices.begin();
00475                 map<real,int>::iterator last = indices.end();
00476                 for (;it!=last;++it)
00477                     train_indices.push_back(it->second);
00478                 weak_learner_training_set = 
00479                     new SelectRowsVMatrix(unweighted_data, train_indices);
00480                 weak_learner_training_set->defineSizes(inputsize(), 1, 0);
00481             }
00482             else if(modif_train_set_weights)
00483             {
00484                 //No Need for deep copy of the sorted_train_set as after the train it is not used anymore
00485                 // and the data are not modofied, but we need to change the weight
00486                 weak_learner_training_set = train_set;
00487                 int weight_col=train_set->inputsize()+train_set->targetsize();
00488                 for(int i=0;i<train_set->length();i++)
00489                     train_set->put(i,weight_col,example_weights[i]);
00490             }
00491             else
00492             {
00493                 Mat data_weights_column = example_weights.toMat(n,1).copy();
00494                 // to bring the weights to the same average level as 
00495                 // the original ones
00496                 data_weights_column *= initial_sum_weights; 
00497                 VMat data_weights = VMat(data_weights_column);
00498                 weak_learner_training_set = 
00499                     new ConcatColumnsVMatrix(unweighted_data,data_weights);
00500                 weak_learner_training_set->defineSizes(inputsize(), 1, 1);
00501             }
00502         }
00503 
00504         // Create new weak-learner and train it
00505         PP<PLearner> new_weak_learner = ::PLearn::deepCopy(weak_learner_template);
00506         new_weak_learner->setTrainingSet(weak_learner_training_set);
00507         new_weak_learner->setTrainStatsCollector(new VecStatsCollector);
00508         if(expdir!="" && provide_learner_expdir)
00509             new_weak_learner->setExperimentDirectory( expdir / ("WeakLearner"+tostring(stage)+"Expdir") );
00510 
00511         new_weak_learner->train();
00512         new_weak_learner->finalize();
00513 
00514         // calculate its weighted training error 
00515         {
00516             PP<ProgressBar> pb;
00517             if(report_progress && verbosity >1) pb = new ProgressBar("computing weighted training error of weak learner",n);
00518             learners_error[stage] = 0;
00519             for (int i=0; i<n; ++i) {
00520                 if(pb) pb->update(i);
00521                 train_set->getExample(i, input, target, weight);
00522 #ifdef BOUNDCHECK
00523                 if(!(is_equal(target[0],0)||is_equal(target[0],1)))
00524                     PLERROR("In AdaBoost::train() - target is %f in the training set. It should be 0 or 1 as we implement only two class boosting.",target[0]);
00525 #endif
00526                 new_weak_learner->computeOutput(input,output);
00527                 real y_i=target[0];
00528                 real f_i=output[0];
00529                 if(conf_rated_adaboost)
00530                 {
00531                     PLASSERT_MSG(f_i>=0,"In AdaBoost.cc::train() - output[0] should be >= 0 ");
00532                     // an error between 0 and 1 (before weighting)
00533                     examples_error[i] = 2*(f_i+y_i-2*f_i*y_i);
00534                     learners_error[stage] += example_weights[i]*
00535                         examples_error[i]/2;
00536                 }
00537                 else
00538                 {
00539                     // an error between 0 and 1 (before weighting)
00540                     if (pseudo_loss_adaboost) 
00541                     {
00542                         PLASSERT_MSG(f_i>=0,"In AdaBoost.cc::train() - output[0] should be >= 0 ");
00543                         examples_error[i] = 2*(f_i+y_i-2*f_i*y_i);
00544                         learners_error[stage] += example_weights[i]*
00545                             examples_error[i]/2;
00546                     }
00547                     else
00548                     {
00549                         if (fast_exact_is_equal(y_i, 1))
00550                         {
00551                             if (f_i<output_threshold)
00552                             {
00553                                 learners_error[stage] += example_weights[i];
00554                                 examples_error[i]=2;
00555                             }
00556                             else examples_error[i] = 0;
00557                         }
00558                         else
00559                         {
00560                             if (f_i>=output_threshold) {
00561                                 learners_error[stage] += example_weights[i];
00562                                 examples_error[i]=2;
00563                             }
00564                             else examples_error[i]=0;
00565                         }
00566                     }
00567                 }
00568             }
00569         }
00570 
00571         if (verbosity>1)
00572             NORMAL_LOG << "weak learner at stage " << stage 
00573                        << " has average loss = " << learners_error[stage] << endl;
00574 
00575         weak_learners.push_back(new_weak_learner);
00576 
00577         if (save_often && expdir!="")
00578             PLearn::save(append_slash(expdir)+"model.psave", *this);
00579       
00580         // compute the new learner's weight
00581         if(conf_rated_adaboost)
00582         {
00583             // Find optimal weight with line search
00584       
00585             real ax = -10;
00586             real bx = 1;
00587             real cx = 100;
00588             real xmin;
00589             real tolerance = 0.001;
00590             int itmax = 100000;
00591 
00592             int iter;
00593             real xtmp;
00594             real fa, fb, fc, ftmp;
00595 
00596             // compute function for fa, fb and fc
00597 
00598             fa = 0;
00599             fb = 0;
00600             fc = 0;
00601 
00602             for (int i=0; i<n; ++i) {
00603                 train_set->getExample(i, input, target, weight);
00604                 new_weak_learner->computeOutput(input,output);
00605                 real y_i=(2*target[0]-1);
00606                 real f_i=(2*output[0]-1);
00607                 fa += example_weights[i]*exp(-1*ax*f_i*y_i);
00608                 fb += example_weights[i]*exp(-1*bx*f_i*y_i);
00609                 fc += example_weights[i]*exp(-1*cx*f_i*y_i);
00610             }
00611 
00612         
00613             for(iter=1;iter<=itmax;iter++)
00614             {
00615                 if(verbosity>4)
00616                     NORMAL_LOG << "iteration " << iter << ": fx = " << fb << endl;
00617                 if (abs(cx-ax) <= tolerance)
00618                 {
00619                     xmin=bx;
00620                     if(verbosity>3)
00621                     {
00622                         NORMAL_LOG << "nIters for minimum: " << iter << endl;
00623                         NORMAL_LOG << "xmin = " << xmin << endl;
00624                         NORMAL_LOG << "fx = " << fb << endl;
00625                     }
00626                     break;
00627                 }
00628                 if (abs(bx-ax) > abs(bx-cx)) 
00629                 {
00630                     xtmp = (bx + ax) * 0.5;
00631 
00632                     ftmp = 0;
00633                     for (int i=0; i<n; ++i) {
00634                         train_set->getExample(i, input, target, weight);
00635                         new_weak_learner->computeOutput(input,output);
00636                         real y_i=(2*target[0]-1);
00637                         real f_i=(2*output[0]-1);
00638                         ftmp += example_weights[i]*exp(-1*xtmp*f_i*y_i);
00639                     }
00640 
00641                     if (ftmp > fb)
00642                     {
00643                         ax = xtmp;
00644                         fa = ftmp;
00645                     }
00646                     else
00647                     {
00648                         cx = bx;
00649                         fc = fb;
00650                         bx = xtmp;
00651                         fb = ftmp;
00652                     }
00653                 }
00654                 else
00655                 {
00656                     xtmp = (bx + cx) * 0.5;
00657                     ftmp = 0;
00658                     for (int i=0; i<n; ++i) {
00659                         train_set->getExample(i, input, target, weight);
00660                         new_weak_learner->computeOutput(input,output);
00661                         real y_i=(2*target[0]-1);
00662                         real f_i=(2*output[0]-1);
00663                         ftmp += example_weights[i]*exp(-1*xtmp*f_i*y_i);
00664                     }
00665 
00666                     if (ftmp > fb)
00667                     {
00668                         cx = xtmp;
00669                         fc = ftmp;
00670                     }
00671                     else
00672                     {
00673                         ax = bx;
00674                         fa = fb;
00675                         bx = xtmp;
00676                         fb = ftmp;
00677                     }
00678                 }
00679             }
00680             if(verbosity>3)
00681             {
00682                 NORMAL_LOG << "Too many iterations in Brent" << endl;
00683             }
00684             xmin=bx;
00685             voting_weights.push_back(xmin);
00686             sum_voting_weights += abs(voting_weights[stage]);
00687         }
00688         else
00689         {
00690             voting_weights.push_back(
00691                 0.5*safeflog(((1-learners_error[stage])*target_error)
00692                              /(learners_error[stage]*(1-target_error))));
00693             sum_voting_weights += abs(voting_weights[stage]);
00694         }
00695 
00696         real sum_w=0;
00697         for (int i=0;i<n;i++)
00698         {
00699             example_weights[i] *= exp(-voting_weights[stage]*
00700                                       (1-examples_error[i]));
00701             sum_w += example_weights[i];
00702         }
00703         example_weights *= real(1.0)/sum_w;
00704 
00705         computeTrainingError(input, target);
00706 
00707         if(fast_exact_is_equal(learners_error[stage], 0))
00708         {
00709             NORMAL_LOG << "AdaBoost::train found weak learner with 0 training "
00710                        << "error at stage " 
00711                        << stage << " is " << learners_error[stage] << endl;  
00712 
00713             // Simulate infinite weight on new_weak_learner
00714             weak_learners.resize(0);
00715             weak_learners.push_back(new_weak_learner);
00716             voting_weights.resize(0);
00717             voting_weights.push_back(1);
00718             sum_voting_weights = 1;
00719             found_zero_error_weak_learner = true;
00720             stage++;
00721             break;
00722         }
00723 
00724         // stopping criterion (in addition to n_stages)
00725         if (early_stopping && learners_error[stage] >= target_error)
00726         {
00727             nstages = stage;
00728             NORMAL_LOG << 
00729                 "AdaBoost::train early stopping because learner's loss at stage " 
00730                  << stage << " is " << learners_error[stage] << endl;       
00731             break;
00732         }
00733 
00734 
00735     }
00736     PLCHECK(stage==weak_learners.length() || found_zero_error_weak_learner);
00737     Profiler::pl_profile_end("AdaBoost::train");
00738 
00739 }
00740 
00741 void AdaBoost::test(VMat testset, PP<VecStatsCollector> test_stats,
00742                     VMat testoutputs, VMat testcosts) const
00743 {
00744     if(!reuse_test_results){
00745         inherited::test(testset, test_stats, testoutputs, testcosts);
00746         return;
00747     }
00748     Profiler::pl_profile_start("AdaBoost::test()");
00749     int index=-1;
00750     for(int i=0;i<saved_testset.size();i++){
00751         if(saved_testset[i]==testset){
00752             index=i;
00753             break;
00754         }
00755     }
00756     if(index<0){
00757         //first time the testset is seen
00758         Profiler::pl_profile_start("AdaBoost::test() first" );
00759         inherited::test(testset, test_stats, testoutputs, testcosts);
00760         saved_testset.append(testset);
00761         saved_testoutputs.append(PLearn::deepCopy(testoutputs));
00762         PLCHECK(weak_learners.length()==stage || found_zero_error_weak_learner);
00763         cout << weak_learners.length()<<" "<<stage<<endl;;
00764         saved_last_test_stages.append(stage);
00765         Profiler::pl_profile_end("AdaBoost::test() first" );
00766     }else if(found_zero_error_weak_learner && saved_last_test_stages.last()==stage){
00767         Vec input;
00768         Vec output(outputsize());
00769         Vec target;
00770         Vec costs(nTestCosts());
00771         real weight;
00772         VMat old_outputs=saved_testoutputs[index];
00773         PLCHECK(old_outputs->width()==testoutputs->width());
00774         PLCHECK(old_outputs->length()==testset->length());
00775         for(int row=0;row<testset.length();row++){
00776             output=old_outputs(row);
00777             testset.getExample(row, input, target, weight);
00778             computeCostsFromOutputs(input,output,target,costs);
00779             if(testoutputs)testoutputs->putOrAppendRow(row,output);
00780             if(testcosts)testcosts->putOrAppendRow(row,costs);
00781             if(test_stats)test_stats->update(costs,weight);
00782         }
00783     }else{
00784         Profiler::pl_profile_start("AdaBoost::test() seconds" );
00785         PLCHECK(weak_learners.size()>1);
00786         PLCHECK(stage>1);
00787         PLCHECK(weak_learner_output.size()==weak_learner_template->outputsize());
00788 
00789         PLCHECK(saved_testset.length()>index);
00790         PLCHECK(saved_testoutputs.length()>index);
00791         PLCHECK(saved_last_test_stages.length()>index);
00792 
00793         int stages_done = saved_last_test_stages[index];
00794         PLCHECK(weak_learners.size()>=stages_done);
00795          
00796         Vec input;
00797         Vec output(outputsize());
00798         Vec target;
00799         Vec costs(nTestCosts());
00800         real weight;
00801         VMat old_outputs=saved_testoutputs[index];
00802         PLCHECK(old_outputs->width()==testoutputs->width());
00803         PLCHECK(old_outputs->length()==testset->length());
00804 #ifndef NDEBUG
00805         Vec output2(outputsize());
00806         Vec costs2(nTestCosts());
00807 #endif
00808         for(int row=0;row<testset.length();row++){
00809             output=old_outputs(row);
00810             //compute the new testoutputs
00811             Profiler::pl_profile_start("AdaBoost::test() getExample" );
00812             testset.getExample(row, input, target, weight);
00813             Profiler::pl_profile_end("AdaBoost::test() getExample" );
00814             computeOutput_(input, output, stages_done, output[1]);
00815             computeCostsFromOutputs(input,output,target,costs);
00816 #ifndef NDEBUG
00817             computeOutputAndCosts(input,target, output2, costs2);
00818             PLCHECK(output==output2);
00819             PLCHECK(costs.isEqual(costs2,true));
00820 #endif
00821             if(testoutputs)testoutputs->putOrAppendRow(row,output);
00822             if(testcosts)testcosts->putOrAppendRow(row,costs);
00823             if(test_stats)test_stats->update(costs,weight);
00824         }
00825         saved_testoutputs[index]=PLearn::deepCopy(testoutputs);
00826         saved_last_test_stages[index]=stage;
00827         Profiler::pl_profile_end("AdaBoost::test() seconds" );
00828     }
00829     Profiler::pl_profile_end("AdaBoost::test()");
00830 }
00831 
00832 void AdaBoost::computeOutput_(const Vec& input, Vec& output,
00833                               const int start, real const sum) const
00834 {
00835     PLASSERT(weak_learners.size()>0);
00836     PLASSERT(weak_learner_output.size()==weak_learner_template->outputsize());
00837     PLASSERT(output.size()==outputsize());
00838     real sum_out=sum;
00839     if(!pseudo_loss_adaboost && !conf_rated_adaboost)
00840         for (int i=start;i<weak_learners.size();i++){
00841             weak_learners[i]->computeOutput(input,weak_learner_output);
00842             sum_out += (weak_learner_output[0] < output_threshold ? 0 : 1) 
00843                 *voting_weights[i];
00844         }
00845     else
00846         for (int i=start;i<weak_learners.size();i++){
00847             weak_learners[i]->computeOutput(input,weak_learner_output);
00848             sum_out += weak_learner_output[0]*voting_weights[i];
00849         }
00850 
00851     output[0] = sum_out/sum_voting_weights;
00852     if(reuse_test_results)
00853         output[1] = sum_out;
00854 }
00855 
00856 void AdaBoost::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00857                                        const Vec& target, Vec& costs) const
00858 {
00859     //when computing train stats, costs==nTrainCosts() 
00860     //  and forward_sub_learner_test_costs==false
00861     if(forward_sub_learner_test_costs)
00862         PLASSERT(costs.size()==nTestCosts());
00863     else
00864         PLASSERT(costs.size()==nTrainCosts()||costs.size()==nTestCosts());
00865     costs.resize(5);
00866 
00867     // First cost is negative log-likelihood...  output[0] is the likelihood
00868     // of the first class
00869 #ifdef BOUNDCHECK
00870     if (target.size() > 1)
00871         PLERROR("AdaBoost::computeCostsFromOutputs: target must contain "
00872                 "one element only: the 0/1 class");
00873 #endif
00874     if (fast_exact_is_equal(target[0], 0)) {
00875         costs[0] = output[0] >= output_threshold; 
00876     }
00877     else if (fast_exact_is_equal(target[0], 1)) {
00878         costs[0] = output[0] < output_threshold; 
00879     }
00880     else PLERROR("AdaBoost::computeCostsFromOutputs: target must be "
00881                  "either 0 or 1; current target=%f", target[0]);
00882     costs[1] = exp(-1.0*sum_voting_weights*(2*output[0]-1)*(2*target[0]-1));
00883     costs[2] = costs[0];
00884     if(train_stats){
00885         costs[3] = train_stats->getStat("E[avg_weight_class_0]");
00886         costs[4] = train_stats->getStat("E[avg_weight_class_1]");
00887     }
00888     else
00889         costs[3]=costs[4]=MISSING_VALUE;
00890 
00891     if(forward_sub_learner_test_costs){
00892         //slow as we already have calculated the output
00893         //we should haved called computeOutputAndCosts.
00894         PLWARNING("AdaBoost::computeCostsFromOutputs called with forward_sub_learner_test_costs true. This should be optimized!");
00895         weighted_costs.resize(weak_learner_template->nTestCosts());
00896         sum_weighted_costs.resize(weak_learner_template->nTestCosts());
00897         sum_weighted_costs.clear();
00898         for(int i=0;i<weak_learners.size();i++){
00899             weak_learners[i]->computeCostsOnly(input, target, weighted_costs);
00900             weighted_costs*=voting_weights[i];
00901             sum_weighted_costs+=weighted_costs;
00902         }
00903         costs.append(sum_weighted_costs);
00904     }
00905 
00906     PLASSERT(costs.size()==nTrainCosts()||costs.size()==nTestCosts());
00907 }
00908 
00909 void AdaBoost::computeOutputAndCosts(const Vec& input, const Vec& target,
00910                                      Vec& output, Vec& costs) const
00911 {
00912     PLASSERT(weak_learners.size()>0);
00913     PLASSERT(weak_learner_output.size()==weak_learner_template->outputsize());
00914     PLASSERT(output.size()==outputsize());
00915     real sum_out=0;
00916     
00917     if(forward_sub_learner_test_costs){
00918         weighted_costs.resize(weak_learner_template->nTestCosts());
00919         sum_weighted_costs.resize(weak_learner_template->nTestCosts());
00920         sum_weighted_costs.clear();
00921         if(!pseudo_loss_adaboost && !conf_rated_adaboost){
00922             for (int i=0;i<weak_learners.size();i++){
00923                 weak_learners[i]->computeOutputAndCosts(input,target,
00924                                                         weak_learner_output,
00925                                                         weighted_costs);
00926                 sum_out += (weak_learner_output[0] < output_threshold ? 0 : 1) 
00927                     *voting_weights[i];
00928                 weighted_costs*=voting_weights[i];
00929                 sum_weighted_costs+=weighted_costs;
00930             }
00931         }else{
00932             for (int i=0;i<weak_learners.size();i++){
00933                 weak_learners[i]->computeOutputAndCosts(input,target,
00934                                                         weak_learner_output,
00935                                                         weighted_costs);
00936                 sum_out += weak_learner_output[0]*voting_weights[i];
00937                 weighted_costs*=voting_weights[i];
00938                 sum_weighted_costs+=weighted_costs;
00939             }
00940         }
00941     }else{
00942         if(!pseudo_loss_adaboost && !conf_rated_adaboost)
00943             for (int i=0;i<weak_learners.size();i++){
00944                 weak_learners[i]->computeOutput(input,weak_learner_output);
00945                 sum_out += (weak_learner_output[0] < output_threshold ? 0 : 1) 
00946                     *voting_weights[i];
00947             }
00948         else
00949             for (int i=0;i<weak_learners.size();i++){
00950                 weak_learners[i]->computeOutput(input,weak_learner_output);
00951                 sum_out += weak_learner_output[0]*voting_weights[i];
00952             }
00953     }
00954 
00955     output[0] = sum_out/sum_voting_weights;
00956     if(reuse_test_results)
00957         output[1] = sum_out;
00958 
00959     //when computing train stats, costs==nTrainCosts() 
00960     //  and forward_sub_learner_test_costs==false
00961     if(forward_sub_learner_test_costs)
00962         PLASSERT(costs.size()==nTestCosts());
00963     else
00964         PLASSERT(costs.size()==nTrainCosts()||costs.size()==nTestCosts());
00965     costs.resize(5);
00966     costs.clear();
00967 
00968     // First cost is negative log-likelihood...  output[0] is the likelihood
00969     // of the first class
00970     if (target.size() > 1)
00971         PLERROR("AdaBoost::computeCostsFromOutputs: target must contain "
00972                 "one element only: the 0/1 class");
00973     if (fast_exact_is_equal(target[0], 0)) {
00974         costs[0] = output[0] >= output_threshold; 
00975     }
00976     else if (fast_exact_is_equal(target[0], 1)) {
00977         costs[0] = output[0] < output_threshold; 
00978     }
00979     else PLERROR("AdaBoost::computeCostsFromOutputs: target must be "
00980                  "either 0 or 1; current target=%f", target[0]);
00981     costs[1] = exp(-1.0*sum_voting_weights*(2*output[0]-1)*(2*target[0]-1));
00982     costs[2] = costs[0];
00983     if(train_stats){
00984         costs[3] = train_stats->getStat("E[avg_weight_class_0]");
00985         costs[4] = train_stats->getStat("E[avg_weight_class_1]");
00986     }
00987     else
00988         costs[3]=costs[4]=MISSING_VALUE;
00989 
00990     if(forward_sub_learner_test_costs){
00991         costs.append(sum_weighted_costs);
00992     }
00993 
00994     PLASSERT(costs.size()==nTrainCosts()||costs.size()==nTestCosts());
00995 }
00996 
00997 
00998 TVec<string> AdaBoost::getTestCostNames() const
00999 {
01000     TVec<string> costs=getTrainCostNames();
01001 
01002     if(forward_sub_learner_test_costs){
01003         TVec<string> subcosts;
01004         //We try to find a weak_learner with a train set
01005         //as a RegressionTree need it to generate the test costs names
01006         if(weak_learner_template->getTrainingSet())
01007             subcosts=weak_learner_template->getTestCostNames();
01008         else if(weak_learners.length()>0)
01009             subcosts=weak_learners[0]->getTestCostNames();
01010         else
01011             subcosts=weak_learner_template->getTestCostNames();
01012         for(int i=0;i<subcosts.length();i++){
01013             subcosts[i]="weighted_weak_learner."+subcosts[i];
01014         }
01015         costs.append(subcosts);
01016     }
01017     return costs;
01018 }
01019 
01020 TVec<string> AdaBoost::getTrainCostNames() const
01021 {
01022     TVec<string> costs(5);
01023     costs[0] = "binary_class_error";
01024     costs[1] = "exp_neg_margin";
01025     costs[2] = "class_error";
01026     costs[3] = "avg_weight_class_0";
01027     costs[4] = "avg_weight_class_1";
01028     return costs;
01029 }
01030 
01031 void AdaBoost::computeTrainingError(Vec input, Vec target)
01032 {
01033     if (compute_training_error)
01034     {
01035         PLASSERT(train_set);
01036         int n=train_set->length();
01037         PP<ProgressBar> pb;
01038         if(report_progress) pb = new ProgressBar("computing weighted training error of whole model",n);
01039         train_stats->forget();
01040         Vec err(nTrainCosts());
01041         int nb_class_0=0;
01042         int nb_class_1=0;
01043         real cum_weights_0=0;
01044         real cum_weights_1=0;
01045 
01046         bool save_forward_sub_learner_test_costs = 
01047             forward_sub_learner_test_costs;
01048         forward_sub_learner_test_costs=false;
01049         real weight;
01050         for (int i=0;i<n;i++)
01051         {
01052             if(report_progress) pb->update(i);
01053             train_set->getExample(i, input, target, weight);
01054             computeCostsOnly(input,target,err);
01055             if(fast_is_equal(target[0],0.)){
01056                 cum_weights_0 += example_weights[i];
01057                 nb_class_0++;
01058             }else{
01059                 cum_weights_1 += example_weights[i];
01060                 nb_class_1++;
01061             }
01062             err[3]=cum_weights_0/nb_class_0;
01063             err[4]=cum_weights_1/nb_class_1;
01064             train_stats->update(err);
01065         }
01066         train_stats->finalize();
01067         forward_sub_learner_test_costs = 
01068             save_forward_sub_learner_test_costs;
01069 
01070         if (verbosity>2)
01071             NORMAL_LOG << "At stage " << stage << 
01072                 " boosted (weighted) classification error on training set = " 
01073                        << train_stats->getMean() << endl;
01074      
01075     }
01076 }
01077 
01078 void AdaBoost::setTrainingSet(VMat training_set, bool call_forget)
01079 { 
01080     PLCHECK(weak_learner_template);
01081     
01082     if(weak_learner_template->classname()=="RegressionTree"){
01083         //we do this for optimization. Otherwise we will creat a RegressionTreeRegister
01084         //for each weak_learner. This is time consuming as it sort the dataset
01085         if(training_set->classname()!="RegressionTreeRegisters")
01086             training_set = new RegressionTreeRegisters(training_set,
01087                                                        report_progress,
01088                                                        verbosity,
01089                                                        !finalized, !finalized);
01090 
01091         //we need to change the weight of the trainning set to reuse the RegressionTreeRegister
01092         if(!modif_train_set_weights){
01093             if(training_set->weightsize()==1)
01094                 modif_train_set_weights=1;
01095             else
01096                 NORMAL_LOG<<"In AdaBoost::setTrainingSet() -"
01097                           <<" We have RegressionTree as weak_learner, but the"
01098                           <<" training_set don't have a weigth. This will cause"
01099                           <<" the creation of a RegressionTreeRegisters at"
01100                           <<" each stage of AdaBoost!";
01101         }
01102         //we do this as RegressionTreeNode need a train_set for getTestCostNames
01103         if(!weak_learner_template->getTrainingSet())
01104             weak_learner_template->setTrainingSet(training_set,call_forget);
01105         for(int i=0;i<weak_learners.length();i++)
01106             if(!weak_learners[i]->getTrainingSet())
01107                 weak_learners[i]->setTrainingSet(training_set,call_forget);
01108         
01109     }
01110 
01111     inherited::setTrainingSet(training_set, call_forget);
01112 }
01113 
01114 } // end of namespace PLearn
01115 
01116 
01117 /*
01118   Local Variables:
01119   mode:c++
01120   c-basic-offset:4
01121   c-file-style:"stroustrup"
01122   c-file-offsets:((innamespace . 0)(inline-open . 0))
01123   indent-tabs-mode:nil
01124   fill-column:79
01125   End:
01126 */
01127 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines