PLearn 0.1
StackedLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // StackedLearner.cc
00004 //
00005 // Copyright (C) 2003 Yoshua Bengio 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: StackedLearner.cc 9688 2008-11-14 21:37:42Z ducharme $
00037  ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio
00040 
00044 #include "StackedLearner.h"
00045 #include <plearn/vmat/PLearnerOutputVMatrix.h>
00046 #include <plearn/vmat/ShiftAndRescaleVMatrix.h>
00047 #include <plearn/base/stringutils.h>
00048 #include <plearn/vmat/SeparateInputVMatrix.h>
00049 
00050 namespace PLearn {
00051 using namespace std;
00052 
00053 StackedLearner::StackedLearner() 
00054     : default_operation("mean"),
00055       base_train_splitter(0),
00056       train_base_learners(true),
00057       normalize_base_learners_output(false),
00058       precompute_base_learners_output(false),
00059       put_raw_input(false),
00060       share_learner(false),
00061       nsep(1)
00062 { }
00063 
00064 PLEARN_IMPLEMENT_OBJECT(
00065     StackedLearner, 
00066     "Implements stacking, that combines two levels of learner, the 2nd level using the 1st outputs as inputs",
00067     "NOTE: If you need a simple mechanism for chaining multiple learners, consider using ChainedLearners instead.\n"
00068     "Stacking is a generic strategy in which two levels (or more, recursively) of learners\n"
00069     "are combined. The lower level may have one or more learners, and they may be trained\n"
00070     "on the same or different data from the upper level single learner. A shared learner can\n"
00071     "also be trained on different parts of the input. The outputs of the\n"
00072     "1st level learners are concatenated and serve as inputs to the second level learner.\n"
00073     "\n"
00074     "Contrarily to previous versions, it is now PERMITTED for each learner to\n"
00075     "have a different outputsize() if an explicit combiner is in use.  We assume\n"
00076     "that the combiner knows what to do."
00077     "\n"
00078     "There is also the option to copy the input of the 1st level learner as additional\n"
00079     "inputs for the second level (put_raw_input).  If requested, the raw_inputs are\n"
00080     "appended AT THE END of the combiner input vector.\n"
00081     "\n"
00082     "A Splitter can optionally be provided to specify how to split the data into\n"
00083     "the training /validation sets for the lower and upper levels respectively\n"
00084     );
00085 
00086 void StackedLearner::declareOptions(OptionList& ol)
00087 {
00088     // ### Declare all of this object's options here
00089     // ### For the "flags" of each option, you should typically specify  
00090     // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00091     // ### OptionBase::tuningoption. Another possible flag to be combined with
00092     // ### is OptionBase::nosave
00093 
00094     declareOption(ol, "base_learners", &StackedLearner::base_learners, OptionBase::buildoption,
00095                   "A set of 1st level base learners that are independently trained (here or elsewhere)\n"
00096                   "and whose outputs will serve as inputs to the combiner (2nd level learner)");
00097 
00098     declareOption(ol, "combiner", &StackedLearner::combiner, OptionBase::buildoption,
00099                   "A learner that is trained (possibly on a data set different from the\n"
00100                   "one used to train the base_learners) using the outputs of the\n"
00101                   "base_learners as inputs. If it is not provided, then the StackedLearner\n"
00102                   "simply performs \"default_operation\" on the outputs of the base_learners\n");
00103 
00104     declareOption(ol, "default_operation", &StackedLearner::default_operation,
00105                   OptionBase::buildoption,
00106                   "If no combiner is provided, simple operation to be performed\n"
00107                   "on the outputs of the base_learners.\n"
00108                   "Supported: mean (default), min, max, variance, sum, sumofsquares, dmode (majority vote)\n");
00109 
00110     declareOption(ol, "splitter", &StackedLearner::splitter, OptionBase::buildoption,
00111                   "A Splitter used to select which data subset(s) goes to training the base_learners\n"
00112                   "and which data subset(s) goes to training the combiner. If not provided then the\n"
00113                   "same data is used to train and test both levels. If provided, in each split, there should be\n"
00114                   "two sets: the set on which to train the first level and the set on which to train the second one\n");
00115 
00116     declareOption(ol, "base_train_splitter", &StackedLearner::base_train_splitter, OptionBase::buildoption,
00117                   "This splitter can be used to split the training set into different training sets for each base learner\n"
00118                   "If it is not set, the same training set will be applied to the base learners.\n"
00119                   "If \"splitter\" is also used, it will be applied first to determine the training set used by base_train_splitter.\n"
00120                   "The splitter should give as many splits as base learners, and each split should contain one set.");
00121 
00122     declareOption(ol, "train_base_learners", &StackedLearner::train_base_learners, OptionBase::buildoption,
00123                   "whether to train the base learners in the method train (otherwise they should be\n"
00124                   "initialized properly at construction / setOption time)\n");
00125 
00126     declareOption(ol, "normalize_base_learners_output", &StackedLearner::normalize_base_learners_output, OptionBase::buildoption,
00127                   "If set to 1, the output of the base learners on the combiner training set\n"
00128                   "will be normalized (zero mean, unit variance) before training the combiner.");
00129                 
00130     declareOption(ol, "precompute_base_learners_output", &StackedLearner::precompute_base_learners_output, OptionBase::buildoption,
00131                   "If set to 1, the output of the base learners on the combiner training set\n"
00132                   "will be precomputed in memory before training the combiner (this may speed\n"
00133                   "up significantly the combiner training process).");
00134 
00135   
00136     declareOption(ol, "put_raw_input", &StackedLearner::put_raw_input, OptionBase::buildoption,
00137                   "Whether to put the raw inputs in addition of the base learners\n"
00138                   "outputs, in input of the combiner.  The raw_inputs are\n"
00139                   "appended AT THE END of the combiner input vector\n");
00140 
00141     declareOption(ol, "share_learner", &StackedLearner::share_learner, OptionBase::buildoption,
00142                   "If set to 1, the input is divided in nsep equal parts, and a common learner\n"
00143                   "is trained as if each part constitutes a training example.");
00144 
00145     declareOption(ol, "nsep", &StackedLearner::nsep, OptionBase::buildoption,
00146                   "Number of input separations. The input size needs to be a "
00147                   "multiple of that value\n");
00148 
00149     // Now call the parent class' declareOptions
00150     inherited::declareOptions(ol);
00151 }
00152 
00153 void StackedLearner::setTrainStatsCollector(PP<VecStatsCollector> statscol)
00154 {
00155     inherited::setTrainStatsCollector(statscol);
00156     if (combiner)
00157         combiner->setTrainStatsCollector(statscol);
00158 }
00159 
00160 void StackedLearner::build_()
00161 {
00181     if (base_learners.size() == 0)
00182         PLERROR("StackedLearner::build_: no base learners specified!  Use the "
00183                 "'base_learners' option");
00184   
00185     if (splitter && splitter->nSetsPerSplit()!=2)
00186         PLERROR("StackedLearner: the Splitter should produce only two sets per split, got %d",
00187                 splitter->nSetsPerSplit());
00188     if(share_learner && base_train_splitter)
00189         PLERROR("StackedLearner::build_: options 'base_train_splitter' and 'share_learner'\n"
00190                 "cannot both be true");
00191 
00192     resizeBaseLearnersOutputs();
00193     default_operation = lowerstring( default_operation );
00194 }
00195 
00196 // ### Nothing to add here, simply calls build_
00197 void StackedLearner::build()
00198 {
00199     inherited::build();
00200     build_();
00201 }
00202 
00203 
00204 void StackedLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00205 {
00206     deepCopyField(base_learners_outputs,     copies);
00207     deepCopyField(all_base_learners_outputs, copies);
00208     deepCopyField(base_learners,             copies);
00209     deepCopyField(combiner,                  copies);
00210     deepCopyField(splitter,                  copies);
00211     deepCopyField(base_train_splitter,       copies);
00212 
00213     inherited::makeDeepCopyFromShallowCopy(copies);
00214 }
00215 
00216 
00217 void StackedLearner::setExperimentDirectory(const PPath& the_expdir)
00218 {
00219     if (the_expdir != "") {
00220         for (int i=0, n=base_learners.size() ; i<n ; ++i)
00221             base_learners[i]->setExperimentDirectory(the_expdir /
00222                                                      "Base"+tostring(i));
00223         if (combiner)
00224             combiner->setExperimentDirectory(the_expdir / "Combiner");
00225     }
00226 }
00227 
00228 
00229 int StackedLearner::outputsize() const
00230 {
00231     // compute and return the size of this learner's output, (which typically
00232     // may depend on its inputsize(), targetsize() and set options)
00233     if (combiner)
00234         return combiner->outputsize();
00235     else
00236         return base_learners[0]->outputsize();
00237 }
00238 
00239 void StackedLearner::forget()
00240 {
00241     if (train_base_learners)
00242         for (int i=0;i<base_learners.length();i++)
00243             base_learners[i]->forget();
00244     if (combiner)
00245         combiner->forget();
00246 }
00247 
00248 void StackedLearner::setTrainingSet(VMat training_set, bool call_forget)
00249 {
00250     inherited::setTrainingSet(training_set, call_forget);
00251 
00252     if (splitter) {
00253         splitter->setDataSet(training_set);
00254         if (splitter->nsplits() !=1 )
00255             PLERROR("In StackedLearner::setTrainingSet - "
00256                     "The splitter provided should only return one split");
00257 
00258         // Split[0] goes to the base learners; Split[1] goes to combiner
00259         TVec<VMat> sets = splitter->getSplit();
00260         setBaseLearnersTrainingSet(sets[0], call_forget);
00261         setCombinerTrainingSet    (sets[1], call_forget);
00262     }
00263     else {
00264         setBaseLearnersTrainingSet(training_set, call_forget);
00265         setCombinerTrainingSet    (training_set, call_forget);
00266     }
00267   
00268     // Changing the training set may change the outputsize of the base learners.
00269     resizeBaseLearnersOutputs();
00270 }
00271 
00272 void StackedLearner::train()
00273 {
00274     if (!train_stats)
00275         PLERROR("StackedLearner::train: train_stats has not been set!");
00276 
00277     if (splitter && splitter->nsplits() != 1)
00278         PLERROR("StackedLearner: multi-splits case not implemented yet");
00279 
00280     // --- PART 1: TRAIN THE BASE LEARNERS ---
00281     if (train_base_learners) {
00282         if(stage == 0) {
00283             for (int i=0;i<base_learners.length();i++)
00284             {
00285                 PP<VecStatsCollector> stats = new VecStatsCollector();
00286                 base_learners[i]->setTrainStatsCollector(stats);
00287                 base_learners[i]->nstages = nstages;
00288                 base_learners[i]->train();
00289                 stats->finalize(); // WE COULD OPTIONALLY SAVE THEM AS WELL!
00290             }
00291             stage++;
00292         }
00293         else
00294             for (int i=0;i<base_learners.length();i++)
00295             {
00296                 base_learners[i]->nstages = nstages;
00297                 base_learners[i]->train();
00298             }
00299     }
00300 
00301     // --- PART 2: TRAIN THE COMBINER ---
00302     if (combiner)
00303     {
00304         if (normalize_base_learners_output) {
00305             // Normalize the combiner training set.
00306             VMat normalized_trainset = 
00307                 new ShiftAndRescaleVMatrix(combiner->getTrainingSet(), -1);
00308             combiner->setTrainingSet(normalized_trainset);
00309         }
00310         if (precompute_base_learners_output) {
00311             // First precompute the train set of the combiner in memory.
00312             VMat precomputed_trainset = combiner->getTrainingSet();
00313             precomputed_trainset.precompute();
00314             combiner->setTrainingSet(precomputed_trainset, false);
00315         }
00316         combiner->setTrainStatsCollector(train_stats);
00317         combiner->train();
00318     }
00319 }
00320 
00321 
00322 void StackedLearner::computeOutput(const Vec& input, Vec& output) const
00323 {
00324     all_base_learners_outputs.resize(0);
00325     if(share_learner) {
00326         for (int i=0;i<nsep;i++)  {
00327             if (!base_learners[0])
00328                 PLERROR("StackedLearner::computeOutput: base learners have not been created!");
00329             base_learners_outputs[i].resize(base_learners[0]->outputsize());
00330             base_learners[0]->computeOutput(input.subVec(i*input.length() / nsep,
00331                                                          input.length() / nsep),
00332                                             base_learners_outputs[i]);
00333 
00334             // append() will be costly only the first time computeOutputAndCosts
00335             // is called; afterwards storage will NOT be reallocated
00336             all_base_learners_outputs.append(base_learners_outputs[i]);
00337         }
00338     }
00339     else {
00340         for (int i=0;i<base_learners.length();i++) {
00341             if (!base_learners[i])
00342                 PLERROR("StackedLearner::computeOutput: base learners have not been created!");
00343             base_learners_outputs[i].resize(base_learners[i]->outputsize());
00344             base_learners[i]->computeOutput(input, base_learners_outputs[i]);
00345 
00346             // append() will be costly only the first time computeOutputAndCosts
00347             // is called; afterwards storage will NOT be reallocated
00348             all_base_learners_outputs.append(base_learners_outputs[i]);
00349         }
00350     }
00351 
00352     if (put_raw_input)
00353         all_base_learners_outputs.append(input);
00354   
00355     if (combiner)
00356         combiner->computeOutput(all_base_learners_outputs, output);
00357   
00358     else // just performs default_operation on the outputs
00359     {
00360         // This is a bit inconvenient... Make it a temporary matrix
00361         // If it's often needed, i'll optimize it further  --Nicolas
00362         PLASSERT( base_learners_outputs.size() > 0 );
00363         Mat base_outputs_mat(base_learners_outputs.size(),
00364                              base_learners[0]->outputsize());
00365         for (int i=0, n=base_learners_outputs.size() ; i<n ; ++i)
00366             base_outputs_mat(i) << base_learners_outputs[i];
00367     
00368         if( default_operation == "mean" )
00369             columnMean(base_outputs_mat, output);
00370         else if( default_operation == "min" )
00371             columnMin(base_outputs_mat, output);
00372         else if( default_operation == "max" )
00373             columnMax(base_outputs_mat, output);
00374         else if( default_operation == "sum" )
00375             columnSum(base_outputs_mat, output);
00376         else if( default_operation == "sumofsquares" )
00377             columnSumOfSquares(base_outputs_mat, output);
00378         else if( default_operation == "variance" )
00379         {
00380             Vec mean;
00381             columnMean(base_outputs_mat, mean);
00382             columnVariance(base_outputs_mat, output, mean);
00383         }
00384         else if( default_operation == "dmode")
00385         {
00386             // NC: should this vvvvvvvvvvvv be base_learners_outputs.length() for sharing?
00387             StatsCollector sc(base_learners.length());
00388             for(int o=0; o<output.length(); o++)
00389             {
00390                 sc.forget();
00391                 for(int j=0; j<base_outputs_mat.length(); j++)         
00392                     sc.update(base_outputs_mat(o,j),1);
00393                 output[o] = sc.dmode();
00394             }
00395         }
00396         else
00397             PLERROR("StackedLearner::computeOutput: unsupported default_operation");
00398     }
00399 }
00400 
00401 void StackedLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00402                                              const Vec& target, Vec& costs) const
00403 {
00404     if (combiner)
00405         combiner->computeCostsFromOutputs(all_base_learners_outputs,
00406                                           output,target,costs);
00407     else // cheat
00408     {
00409         if(share_learner)
00410             base_learners[0]->computeCostsFromOutputs(input.subVec(0,input.length() / nsep),
00411                                                       output, target, costs);
00412         else
00413             base_learners[0]->computeCostsFromOutputs(input,output,target,costs);
00414     }
00415 }
00416 
00417 bool StackedLearner::computeConfidenceFromOutput(const Vec& input, const Vec& output,
00418                                                  real probability,
00419                                                  TVec< pair<real,real> >& intervals) const
00420 {
00421     if (! combiner)
00422         PLERROR("StackedLearner::computeConfidenceFromOutput: a 'combiner' must be specified "
00423                 "in order to compute confidence intervals.");
00424  
00425     all_base_learners_outputs.resize(0);
00426     if(share_learner)
00427     {
00428         for (int i=0;i<nsep;i++)
00429         {
00430             if (!base_learners[0])
00431                 PLERROR("StackedLearner::computeOutput: base learners have not been created!");
00432             base_learners_outputs[0].resize(base_learners[0]->outputsize());
00433             base_learners[0]->computeOutput(input.subVec(i*input.length()/nsep,
00434                                                          input.length()/nsep),
00435                                             base_learners_outputs[i]);
00436 
00437             all_base_learners_outputs.append(base_learners_outputs[i]);
00438         }
00439     }
00440     else
00441     {
00442         for (int i=0;i<base_learners.length();i++)
00443         {
00444             if (!base_learners[i])
00445                 PLERROR("StackedLearner::computeOutput: base learners have not been created!");
00446             base_learners_outputs[i].resize(base_learners[i]->outputsize());
00447             base_learners[i]->computeOutput(input, base_learners_outputs[i]);
00448 
00449             all_base_learners_outputs.append(base_learners_outputs[i]);
00450         }
00451     }
00452   
00453     if (put_raw_input)
00454         all_base_learners_outputs.append(input);
00455   
00456     return combiner->computeConfidenceFromOutput(all_base_learners_outputs,
00457                                                  output, probability, intervals);
00458 }
00459 
00460 TVec<string> StackedLearner::getTestCostNames() const
00461 {
00462     // Return the names of the costs computed by computeCostsFromOutpus
00463     // (these may or may not be exactly the same as what's returned by getTrainCostNames)
00464     if (combiner)
00465         return combiner->getTestCostNames();
00466     else
00467         return base_learners[0]->getTestCostNames();
00468 }
00469 
00470 TVec<string> StackedLearner::getTrainCostNames() const
00471 {
00472     // Return the names of the objective costs that the train method computes and 
00473     // for which it updates the VecStatsCollector train_stats
00474     if (combiner)
00475         return combiner->getTrainCostNames();
00476     else
00477         return base_learners[0]->getTrainCostNames();
00478 }
00479 
00480 
00482 // resizeBaseLearnersOutputs //
00484 void StackedLearner::resizeBaseLearnersOutputs() {
00485     // Ensure that all base learners have the same outputsize if we don't use
00486     // a combiner
00487     PLASSERT( base_learners.size() > 0 && base_learners[0] );
00488     if (! combiner && ! share_learner) {
00489         int outputsize = base_learners[0]->outputsize();
00490         if (outputsize > 0) {
00491             for (int i=1, n=base_learners.size() ; i<n ; ++i)
00492                 if (base_learners[i]->outputsize() != outputsize)
00493                     PLERROR("StackedLearner::build_: base learner #%d does not have the same "
00494                             "outputsize (=%d) as base learner #0 (=%d); all outputsizes for "
00495                             "base learners must be identical",
00496                             i, base_learners[i]->outputsize(), outputsize);
00497         }
00498     }
00499   
00500     if(share_learner)
00501         base_learners_outputs.resize(nsep);
00502     else
00503         base_learners_outputs.resize(base_learners.size());
00504 }
00505 
00506 
00507 void StackedLearner::setBaseLearnersTrainingSet(VMat base_trainset, bool call_forget)
00508 {
00509     PLASSERT( base_learners.size() > 0 );
00510   
00511     // Handle parameter sharing
00512     if(share_learner) {
00513         base_learners[0]->setTrainingSet(
00514             new SeparateInputVMatrix(base_trainset, nsep),
00515             call_forget && train_base_learners);
00516     }
00517     else {
00518         if (base_train_splitter) {
00519             // Handle base splitter
00520             base_train_splitter->setDataSet(base_trainset);
00521             for (int i=0;i<base_learners.length();i++) {
00522                 base_learners[i]->setTrainingSet(base_train_splitter->getSplit(i)[0],
00523                                                  call_forget && train_base_learners);
00524             }
00525         }
00526         else {
00527             // Default situation: set the same training set into each base learner
00528             for (int i=0;i<base_learners.length();i++)
00529                 base_learners[i]->setTrainingSet(base_trainset,
00530                                                  call_forget && train_base_learners);
00531         }
00532     }
00533 }
00534 
00535 void StackedLearner::setCombinerTrainingSet(VMat comb_trainset, bool call_forget)
00536 {
00537     // Handle combiner
00538     if (combiner) {
00539         VMat effective_trainset = comb_trainset;
00540         if (share_learner)
00541             effective_trainset = new SeparateInputVMatrix(comb_trainset, nsep);
00542     
00543         combiner->setTrainingSet(
00544             new PLearnerOutputVMatrix(effective_trainset, base_learners, put_raw_input),
00545             call_forget);
00546     }
00547 }
00548 
00549 
00550 } // end of namespace PLearn
00551 
00552 
00553 /*
00554   Local Variables:
00555   mode:c++
00556   c-basic-offset:4
00557   c-file-style:"stroustrup"
00558   c-file-offsets:((innamespace . 0)(inline-open . 0))
00559   indent-tabs-mode:nil
00560   fill-column:79
00561   End:
00562 */
00563 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines