PLearn 0.1
PTester.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PTester.cc
00004 //
00005 // Copyright (C) 2002 Pascal Vincent, Frederic Morin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************
00036  * $Id: PTester.cc 10220 2009-05-27 20:40:20Z tihocan $
00037  ******************************************************* */
00038 
00041 #include "PTester.h"
00042 #include <plearn/io/load_and_save.h>
00043 #include <plearn/io/openString.h>
00044 #include <plearn/io/openFile.h>
00045 #include <plearn/math/VecStatsCollector.h>
00046 #include <plearn/vmat/FileVMatrix.h>
00047 #include <plearn/vmat/MemoryVMatrix.h>
00048 #include <assert.h>
00049 #include <plearn/base/RemoteDeclareMethod.h>
00050 #include <plearn_learners/hyper/HyperLearner.h>
00051 
00052 #include <plearn/misc/PLearnService.h>
00053 
00054 #include <plearn/base/stringutils.h>
00055 #if USING_MPI
00056 #include <plearn/sys/PLMPI.h>
00057 #endif
00058 
00059 namespace PLearn {
00060 using namespace std;
00061 
00062 TVec<string> addprepostfix(const string& prefix, const TVec<string>& names, const string& postfix)
00063 {
00064     TVec<string> newnames(names.size());
00065     TVec<string>::const_iterator it = names.begin();
00066     TVec<string>::iterator newit = newnames.begin();
00067     while(it!=names.end())
00068     {
00069         *newit = prefix + *it + postfix;
00070         ++it;
00071         ++newit;
00072     }
00073     return newnames;
00074 }
00075 
00076 template<class T> TVec<T> operator&(const T& x, const TVec<T>& v)
00077 {
00078     int l = v.size();
00079     TVec<T> res(1+l);
00080     res[0] = x;
00081     res.subVec(1,l) << v;
00082     return res;
00083 }
00084 
00086 // PTester //
00088 PTester::PTester():
00089        reloaded(false),
00090        need_to_save_test_names(false),
00091        save_mode_(PStream::plearn_ascii),
00092        save_mode("plearn_ascii"),
00093        provide_learner_expdir(false),
00094        report_stats(true),
00095        save_data_sets(false),
00096        save_initial_learners(false),
00097        save_initial_tester(true),
00098        save_learners(true),
00099        save_stat_collectors(true),
00100        save_split_stats(true),
00101        save_test_costs(false),
00102        save_test_outputs(false),
00103        save_test_names(true),
00104        call_forget_in_run(true),
00105        save_test_confidence(false),
00106        should_train(true),
00107        should_test(true),
00108        finalize_learner(false),
00109        enforce_clean_expdir(true),
00110        redirect_stdout(false),
00111        redirect_stderr(false),
00112        parallelize_here(true)
00113 {}
00114 
00115 PLEARN_IMPLEMENT_OBJECT(
00116     PTester,
00117     "Manages a learning experiment, with training and estimation of generalization error.",
00118     "The PTester class allows you to describe a typical learning experiment that you wish to perform, \n"
00119     "as a training/testing of a learning algorithm on a particular dataset.\n"
00120     "The splitter is used to obtain one or several (such as for k-fold) splits of the dataset \n"
00121     "and training/testing is performed on each split. \n"
00122     "Requested statistics are computed, and all requested results are written in an appropriate \n"
00123     "file inside the specified experiment directory. \n"
00124     "Statistics can be either specified entirely from the 'statnames' option, or built from\n"
00125     "'statnames' and 'statmask'. For instance, one may set:\n"
00126     "   statnames = [ \"NLL\" \"mse\" ]\n"
00127     "   statmask  = [ [ \"E[*]\" ] [ \"test#1-2#.*\" ] [ \"E[*]\" \"STDERROR[*]\" ] ]\n"
00128     "and this will compute:\n"
00129     "   E[test1.E[NLL]], STDERROR[test1.E[NLL]], E[test2.E[NLL]], STDERROR[test2.E[NLL]]\n"
00130     "   E[test1.E[mse]], STDERROR[test1.E[mse]], E[test2.E[mse]], STDERROR[test2.E[mse]]\n"
00131     );
00132 
00133 
00134 void PTester::declareOptions(OptionList& ol)
00135 {
00136     declareOption(
00137         ol, "expdir", &PTester::expdir, OptionBase::buildoption,
00138         "Path of this tester's directory in which to save all tester results.\n"
00139         "The directory will be created if it does not already exist.\n"
00140         "If this is an empty string, no directory is created and no output file is generated.\n");
00141 
00142     declareOption(
00143         ol, "dataset", &PTester::dataset, OptionBase::buildoption,
00144         "The dataset to use to generate splits. \n"
00145         "(This is ignored if your splitter is an ExplicitSplitter)\n"
00146         "Data-sets are seen as matrices whose columns or fields are layed out as \n"
00147         "follows: a number of input fields, followed by (optional) target fields, \n"
00148         "followed by a (optional) weight field (to weigh each example).\n"
00149         "The sizes of those areas are given by the VMatrix options \n"
00150         "inputsize targetsize, and weightsize, which are typically used by the \n"
00151         "learner upon building\n");
00152 
00153     declareOption(
00154         ol, "splitter", &PTester::splitter, OptionBase::buildoption,
00155         "The splitter to use to generate one or several train/test tuples from the dataset.");
00156 
00157     declareOption(
00158         ol, "statnames", &PTester::statnames, OptionBase::buildoption,
00159         "A list of global statistics we are interested in.\n"
00160         "These are strings of the form S1[dataset.S2[cost_name]] where:\n"
00161         "  - dataset is train or test1 or test2 ... (train being \n"
00162         "    the first dataset in a split, test1 the second, ...) \n"
00163         "  - cost_name is one of the training or test cost names (depending on dataset) understood \n"
00164         "    by the underlying learner (see its getTrainCostNames and getTestCostNames methods) \n"
00165         "  - S1 and S2 are a statistic, i.e. one of: E (expectation), V(variance), MIN, MAX, STDDEV, ... \n"
00166         "    S2 is computed over the samples of a given dataset split. S1 is over the splits. \n"
00167         "They can also be strings of the form S1[dataset.perf_evaluator_name.cost_name] \n"
00168         "(see option perf_evaluators) \n");
00169 
00170     declareOption(
00171         ol, "statmask", &PTester::statmask, OptionBase::buildoption,
00172         "A list of lists of masks. If provided, each of the lists is used to compose the statnames_processed.\n"
00173         "If not provided the statnames are those in the 'statnames' list. See the class help for an example.\n");
00174 
00175     declareOption(
00176         ol, "learner", &PTester::learner, OptionBase::buildoption,
00177         "The learner to train/test.\n");
00178 
00179     declareOption(
00180         ol, "perf_evaluators", &PTester::perf_evaluators, OptionBase::buildoption,
00181         "If specified, the performance evaluations returned by these named performance evaluators,\n"
00182         "will be appended to the list of cost statistics computed by the learner's test method.\n"
00183         "They will be accessible through the syntax: perf_evaluator_name.cost_name \n");
00184 
00185     declareOption(
00186         ol, "report_stats", &PTester::report_stats, OptionBase::buildoption,
00187         "If true, the computed global statistics specified in statnames will be saved in global_stats.pmat \n"
00188         "and the corresponding per-split statistics will be saved in split_stats.pmat(see save_split_stats) \n"
00189         "For reference, all cost names can be saved with the option save_test_names.");
00190 
00191     declareOption(
00192         ol, "save_initial_tester", &PTester::save_initial_tester, OptionBase::buildoption,
00193         "If true, this PTester object will be saved in its initial state in tester.psave \n"
00194         "Thus if the initial .plearn file gets lost, or modified, we can always see what this tester was.\n");
00195 
00196     declareOption(
00197         ol, "save_stat_collectors", &PTester::save_stat_collectors, OptionBase::buildoption,
00198         "If true, stat collectors for split#k will be saved in Split#k/train_stats.psave and Split#k/test#i_stats.psave");
00199 
00200     declareOption(
00201         ol, "save_split_stats", &PTester::save_split_stats, OptionBase::buildoption,
00202         "If true, will generate the file split_stats.pmat that contain stats about each stragerie.");
00203 
00204     declareOption(
00205         ol, "save_learners", &PTester::save_learners, OptionBase::buildoption,
00206         "If true, the final trained learner for split#k will be saved in Split#k/final_learner.psave."
00207         "The format is defined by save_mode");
00208 
00209     declareOption(
00210         ol, "save_mode", &PTester::save_mode, OptionBase::buildoption,
00211         "The mode to use to save the file.");
00212 
00213     declareOption(
00214         ol, "save_initial_learners", &PTester::save_initial_learners, OptionBase::buildoption,
00215         "If true, the initial untrained learner for split#k (just after forget() has been called) will be saved in Split#k/initial_learner.psave");
00216 
00217     declareOption(
00218         ol, "save_data_sets", &PTester::save_data_sets, OptionBase::buildoption,
00219         "If true, the data set generated for split #k will be saved as Split#k/training_set.vmat Split#k/test1_set.vmat ...");
00220 
00221     declareOption(
00222         ol, "save_test_outputs", &PTester::save_test_outputs, OptionBase::buildoption,
00223         "If true, the outputs of the test for split #k will be saved in Split#k/test#i_outputs.pmat");
00224 
00225     declareOption(
00226         ol, "call_forget_in_run", &PTester::call_forget_in_run, OptionBase::buildoption,
00227         "Indication that run() should make perform() call forget() on the learner to train (won't work for more than 1 split).\n");
00228 
00229     declareOption(
00230         ol, "save_test_costs", &PTester::save_test_costs, OptionBase::buildoption,
00231         "If true, the costs of the test for split #k will be saved in Split#k/test#i_costs.pmat");
00232 
00233     declareOption(
00234         ol, "save_test_names", &PTester::save_test_names, OptionBase::buildoption,
00235         "For reference, all cost names (as given by the learner's getTrainCostNames() and getTestCostNames() ) \n"
00236         "will be reported in files train_cost_names.txt and test_cost_names.txt");
00237 
00238     declareOption(
00239         ol, "provide_learner_expdir", &PTester::provide_learner_expdir, OptionBase::buildoption,
00240         "If true, each learner to be trained will have its experiment directory set to Split#k/LearnerExpdir/");
00241 
00242     declareOption(
00243         ol, "should_train", &PTester::should_train, OptionBase::buildoption,
00244         "If true, the learners are trained, otherwise only tested (in that case it is advised\n"
00245         "to load an already trained learner in the 'learner' field)");
00246 
00247     declareOption(
00248         ol, "train", &PTester::should_train,
00249         OptionBase::learntoption | OptionBase::nosave,
00250         "DEPRECATED - This option has been renamed to 'should_train' in\n"
00251         "order to make it coherent with the 'should_test' option.");
00252 
00253     declareOption(
00254         ol, "should_test", &PTester::should_test, OptionBase::buildoption,
00255         "Whether to carry out the test at all. This can be used, for instance,\n"
00256         "to train only (without testing) and save the learners, and test later. \n"
00257         "Any test statistics that are required to be computed if 'should_test'\n"
00258         "is false yield MISSING_VALUE.\n");
00259 
00260     declareOption(
00261         ol, "finalize_learner", &PTester::finalize_learner,
00262         OptionBase::buildoption,
00263         "Default false. If true, will finalize the learner after the training.");
00264 
00265     declareOption(
00266         ol, "template_stats_collector", &PTester::template_stats_collector, OptionBase::buildoption,
00267         "If provided, this instance of a subclass of VecStatsCollector will be used as a template\n"
00268         "to build all the stats collector used during training and testing of the learner");
00269 
00270     declareOption(
00271         ol, "global_template_stats_collector", &PTester::global_template_stats_collector, OptionBase::buildoption,
00272         "If provided, this instance of a subclass of VecStatsCollector will be used as a template\n"
00273         "to build all the global stats collector that collects statistics over splits");
00274 
00275     declareOption(
00276         ol, "final_commands", &PTester::final_commands, OptionBase::buildoption,
00277         "If provided, the shell commands given will be executed after training is completed");
00278 
00279     declareOption(
00280         ol, "save_test_confidence", &PTester::save_test_confidence,
00281         OptionBase::buildoption,
00282         "Whether to save confidence intervals for the test outputs;\n"
00283         "make sense mostly if 'save_test_outputs' is also true.  The\n"
00284         "intervals are saved in a file SETNAME_confidence.pmat (default=false)");
00285 
00286     declareOption(
00287         ol, "enforce_clean_expdir", &PTester::enforce_clean_expdir,
00288         OptionBase::buildoption,
00289         "If this option is true, the PTester ensures that the expdir does not\n"
00290         "already exist when the experiment is started, and gives a PLerror\n"
00291         "otherwise.  This is the usual and traditional default behavior for\n"
00292         "PTester.  However, in some contexts, one KNOWS that the expdir is brand\n"
00293         "new (e.g. generated by plargs.expdir in a PTester), and might contain\n"
00294         "some precomputed results that are being generated as the model is\n"
00295         "loaded, so it is not empty.  In those contexts, it makes sense to allow\n"
00296         "this option to be false.\n");
00297 
00298     declareOption(
00299         ol, "redirect_stdout", &PTester::redirect_stdout, OptionBase::buildoption,
00300         "If true will redirect the stdout to expdir/stdout.");
00301 
00302     declareOption(
00303         ol, "redirect_stderr", &PTester::redirect_stderr, OptionBase::buildoption,
00304         "If true will redirect the stderr to expdir/stderr.");
00305 
00306     declareOption(
00307         ol, "parallelize_here", &PTester::parallelize_here, OptionBase::buildoption | OptionBase::nosave,
00308         "Reserve remote servers at this level if true.");
00309 
00310     inherited::declareOptions(ol);
00311 }
00312 
00313 void PTester::declareMethods(RemoteMethodMap& rmm)
00314 {
00315     // Insert a backpointer to remote methods; note that this
00316     // different than for declareOptions()
00317     rmm.inherited(inherited::_getRemoteMethodMap_());
00318 
00319     declareMethod(
00320         rmm, "perform", &PTester::perform,
00321         (BodyDoc("Performs the test, and returns the global stats specified in statnames.\n"
00322                  "If 'call_forget' is set to false then the call to setTrainingSet()\n"
00323                  "won't call forget and build.  This is useful for continuation of an\n"
00324                  "incremental training (such as after increasing the number of epochs\n"
00325                  "(nstages) ), or generally when trying different option values that\n"
00326                  "don't require the learning to be restarted from scratch.  However\n"
00327                  "call_forget will be forced to true (even if passed as false) if the\n"
00328                  "splitter returns more than one split.\n"),
00329          ArgDoc ("call_forget", "Whether forget() should be called in setTrainingSet()."),
00330          RetDoc ("Vector of test statistics corresponding to the requested statnames")));
00331 
00332     declareMethod(
00333         rmm, "perform1Split", &PTester::perform1Split,
00334         (BodyDoc("Performs train/test for one split, returns splitres."),
00335          ArgDoc ("splitnum","Split number on which to perform train/test"),
00336          ArgDoc ("call_forget","Whether forget() should be called in setTrainingSet()."),
00337          RetDoc ("Vector of test statistics corresponding to the requested statnames")));
00338 
00339     declareMethod(
00340         rmm, "getStatNames", &PTester::getStatNames,
00341         (BodyDoc("Return the statnames (potentially modified by statmask, if provided);\n"
00342                  "see the 'statnames' and 'statmask' options."),
00343          RetDoc ("Name of computed statistics.")));
00344 
00345     declareMethod(
00346         rmm, "setExperimentDirectory", &PTester::setExperimentDirectory,
00347         (BodyDoc("The experiment directory is the directory in which files related to\n"
00348                  "this model are to be saved.  If it is an empty string, it is understood\n"
00349                  "to mean that the user doesn't want any file created by this learner.\n"),
00350          ArgDoc ("expdir", "Directory name where experimental results should be saved")));
00351 
00352     declareMethod(
00353         rmm, "getExperimentDirectory", &PTester::getExperimentDirectory,
00354         (BodyDoc("Return the currently-set experiment directory (see setExperimentDirectory)."),
00355          RetDoc ("Current expdir.")));
00356 }
00357 
00358 
00359 void PTester::build_()
00360 {
00361 
00362 #if USING_MPI
00363     if (PLMPI::rank!=0)
00364         expdir = "";
00365 #endif
00366 
00367     if(!reloaded && learner && learner->classname()=="HyperLearner"){
00368         if(expdir.isEmpty()){
00369             PLWARNING("PTester::build_() - no expdir. Can't reload.");
00370             return;
00371         }
00372         PPath f = expdir/"Split0"/"LearnerExpdir"/"hyper_learner_auto_save.psave";
00373         bool isf=isfile(f);
00374         if(!reloaded && isf){
00375             if(splitter->nsplits()!=1){
00376                 PLERROR("In PTester::build_() - The auto_save function only work when their is one split.");
00377                 //TODO: this only work if we have only one split
00378             }
00379             Profiler::pl_profile_start("PTester::auto_load");
00380             PLWARNING("In PTester::build_() - reloading from file %s",f.c_str());
00381             HyperLearner *l = new HyperLearner();
00382             PLearn::load(f,l);
00383             l->reloaded=true;
00384             learner=l;
00385             reloaded = true;
00386             Profiler::pl_profile_end("PTester::auto_load");
00387         }
00388     }
00389 
00390     statnames_processed.resize(statnames.length());
00391     statnames_processed << statnames;
00392     if (statmask) {
00393         // First process statmask to remove potential ranges, like test#1-3#.
00394         // The result is stored in the 'sm' variable.
00395         TVec< TVec<string> > sm(statmask.length());
00396         for (int i = 0; i < statmask.length(); i++) {
00397             for (int j = 0; j < statmask[i].length(); j++) {
00398                 string mask = statmask[i][j];
00399                 size_t pos;
00400                 bool is_range = false;
00401                 if ((pos = mask.find('#')) != string::npos) {
00402                     // There is a '#' character.
00403                     size_t pos2;
00404                     if ((pos2 = mask.find('#', pos + 1)) != string::npos) {
00405                         // There is a second '#' character.
00406                         vector<string> range = split(mask.substr(pos + 1, pos2 - pos - 1), '-');
00407                         if (range.size() == 2) {
00408                             // We have a range.
00409                             is_range = true;
00410                             int left = atoi(range[0].c_str());
00411                             int right = atoi(range[1].c_str());
00412                             int delta = 1;
00413                             if (left > right)
00414                                 delta = -1;
00415                             right += delta;
00416                             for (int k = left; k != right; k += delta)
00417                                 sm[i].append(mask.substr(0, pos) + tostring(k) + mask.substr(pos2 + 1, mask.size() - pos2));
00418                         }
00419                     }
00420                 }
00421                 if (!is_range)
00422                     // There is no range.
00423                     sm[i].append(mask);
00424             }
00425         }
00426         TVec< TVec<string> > temp(2);
00427         int d = 0;
00428         if (statnames.isEmpty())
00429             PLERROR("In PTester::build_ - If you use 'statmask' then 'statnames' cannot "
00430                     "be empty (use statnames = [ \"\" ] if you want to specify all "
00431                     "statistics through statmask)");
00432         temp[d] = statnames_processed;
00433         for (int i=0;i<sm.length();i++) {
00434             temp[1-d].resize(temp[d].length() * sm[i].length());
00435 
00436             for (int j=0;j<sm[i].length();j++) {
00437                 string mask = sm[i][j];
00438                 size_t pos;
00439                 if ((pos=mask.find('*'))==string::npos) {
00440                     // This may actually be useful, if we want to force a value.
00441                     for (int k = 0; k < temp[d].length(); k++) {
00442                         temp[1-d][j + k * sm[i].length()] = mask;
00443                     }
00444                 } else {
00445                     for (int k=0;k<temp[d].length();k++) {
00446                         if (temp[d][k].find('*')!=string::npos) {
00447                             PLERROR("In PTester::build_ : elements of statnames cannot contain the '*' character");
00448                         }
00449                         string elem = mask;
00450                         elem.replace(pos,1,temp[d][k]);
00451                         temp[1-d][j + k * sm[i].length()] = elem;
00452                     }
00453                 }
00454             }
00455             d = 1-d;
00456         }
00457         statnames_processed = temp[d];
00458     }
00459 
00460     //Check if all the statnames_processed have their splits present
00461     if(splitter!=NULL){
00462         int nb_testset=splitter->nSetsPerSplit()-1;
00463         for(int i=0;i<statnames_processed.length();i++){
00464             int id = statnames_processed[i].find('[');
00465             char c=statnames_processed[i][id+5];
00466             if(c=='n'){}
00467             else if(pl_islong(tostring(c)) && c>(nb_testset+'0'))
00468                 PLWARNING("In PTester::build_() - the statnames %s ask for"
00469                           " test set %c while their is only %d test set.",
00470                           statnames_processed[i].c_str(),
00471                           c,nb_testset);
00472         }
00473     }
00474 
00475     save_mode_ = PStream::parseModeT(save_mode);
00476 }
00477 
00478 // ### Nothing to add here, simply calls build_
00479 void PTester::build()
00480 {
00481     inherited::build();
00482     build_();
00483 }
00484 
00486 // run //
00488 void PTester::run()
00489 {
00490     perform(call_forget_in_run);
00491 }
00492 
00494 // setExperimentDirectory //
00496 void PTester::setExperimentDirectory(const PPath& the_expdir)
00497 {
00498     expdir = the_expdir / "";
00499 }
00500 
00502 // perform1Split //
00504 Vec PTester::perform1Split(int splitnum, bool call_forget)
00505 {
00506     if (!learner)
00507         PLERROR("PTester::perform1Split : No learner specified for PTester.");
00508     if (!splitter)
00509         PLERROR("PTester::perform1Split : No splitter specified for PTester");
00510 
00511     const int nstats = statnames_processed.length();
00512     const int nsets = splitter->nSetsPerSplit();
00513 
00514     // Stats collectors for individual sets of a split:
00515     TVec< PP<VecStatsCollector> > stcol(nsets);
00516 
00517     for (int setnum = 0; setnum < nsets; setnum++)
00518     {
00519         if (template_stats_collector)
00520         {
00521             CopiesMap copies;
00522             stcol[setnum] = template_stats_collector->deepCopy(copies);
00523         }
00524         else
00525             stcol[setnum] = new VecStatsCollector();
00526     }
00527 
00528 
00529     // Stat specs
00530     TVec<StatSpec> statspecs(nstats);
00531     for(int k = 0; k < nstats; k++)
00532     {
00533         statspecs[k].init(statnames_processed[k]);
00534     }
00535 
00536     PPath splitdir;
00537     bool is_splitdir = false;
00538     if (!expdir.isEmpty())
00539     {
00540         splitdir = expdir / ("Split" + tostring(splitnum));
00541         is_splitdir = true;
00542     }
00543 
00544     TVec<VMat> dsets = splitter->getSplit(splitnum);
00545 
00546     TVec<string> testcostnames;
00547 
00548     if (should_train) {
00549         VMat trainset = dsets[0];
00550         if (is_splitdir && save_data_sets)
00551             PLearn::save(splitdir / "training_set.vmat", trainset);
00552             
00553         if (provide_learner_expdir)
00554         {
00555             if (is_splitdir)
00556                 learner->setExperimentDirectory(splitdir / "LearnerExpdir/");
00557             else
00558                 learner->setExperimentDirectory("");
00559         }
00560 
00561         learner->setTrainingSet(trainset, call_forget);
00562 
00563         testcostnames = learner->getTestCostNames();
00564         TVec<string> traincostnames = learner->getTrainCostNames();
00565         PP<VecStatsCollector> train_stats = stcol[0];
00566         train_stats->setFieldNames(traincostnames);
00567         train_stats->build();
00568         train_stats->forget();
00569         learner->setTrainStatsCollector(train_stats);
00570 
00571 
00572         if (need_to_save_test_names) {
00573             // Now that the learner has a training set, we can be sure the
00574             // cost names can be saved.
00575             saveStringInFile(expdir / "train_cost_names.txt", join(traincostnames, "\n") + "\n");
00576             saveStringInFile(expdir / "test_cost_names.txt", join(testcostnames, "\n") + "\n");
00577             need_to_save_test_names = false;
00578         }
00579 
00580         if (dsets.size() > 1)
00581             learner->setValidationSet(dsets[1]);
00582 
00583         if (is_splitdir && save_initial_learners)
00584             PLearn::save(splitdir / "initial_learner.psave", learner);
00585 
00586         train_stats->forget();
00587         learner->train();
00588         if(finalize_learner)
00589             learner->finalize();
00590         train_stats->finalize();
00591 
00592         if (is_splitdir)
00593         {
00594             if (save_stat_collectors)
00595                 PLearn::save(splitdir / "train_stats.psave", train_stats);
00596             if (save_learners)
00597                 PLearn::save(splitdir / "final_learner.psave", learner, save_mode_);
00598         }
00599     }
00600     else
00601         learner->build();
00602 
00603     // This needs to be after the SetTrainingSet() / build() call to the
00604     // learner.
00605     const int outputsize = learner->outputsize();
00606 
00607     // perf_eval_costs[setnum][perf_evaluator_name][costname] will contain value
00608     // of the given cost returned by the given perf_evaluator on the given setnum
00609     TVec< map<string, map<string, real> > > perf_eval_costs(dsets.length());
00610 
00611     if (testcostnames.isEmpty())
00612         testcostnames = learner->getTestCostNames();
00613     for (int setnum = 1; setnum < nsets; setnum++) {
00614         stcol[setnum]->setFieldNames(testcostnames);
00615         stcol[setnum]->build();
00616         stcol[setnum]->forget();
00617     }
00618 
00619     // Perform the test if required
00620     if (should_test)
00621     {
00622         for (int setnum = 1; setnum < dsets.length(); setnum++)
00623         {
00624             VMat testset = dsets[setnum];
00625             VMat test_outputs;
00626             VMat test_costs;
00627             VMat test_confidence;
00628 
00629             PP<VecStatsCollector> test_stats = stcol[setnum];
00630             const string setname = "test" + tostring(setnum);
00631             if (is_splitdir && save_data_sets)
00632                 PLearn::save(splitdir / (setname + "_set.vmat"), testset);
00633 
00634             // QUESTION Why is this done so late? Can't it be moved
00635             // somewhere earlier? At least before the save_data_sets?
00636             if (is_splitdir)
00637                 force_mkdir(splitdir);
00638 
00639             if (is_splitdir && save_test_outputs)
00640                 test_outputs = new FileVMatrix(splitdir / (setname + "_outputs.pmat"),
00641                                                0, learner->getOutputNames());
00642             else if (!perf_evaluators.empty())
00643             {
00644                 // We don't want to save test outputs to disk, but we
00645                 // need them for pef_evaluators. So let's store them in
00646                 // a MemoryVMatrix
00647                 Mat data(testset.length(), outputsize);
00648                 data.resize(0, outputsize);
00649                 test_outputs = new MemoryVMatrix(data);
00650                 test_outputs->declareFieldNames(learner->getOutputNames());
00651             }
00652 
00653             if (is_splitdir)
00654             {
00655                 if (save_test_costs)
00656                     test_costs = new FileVMatrix(splitdir / (setname + "_costs.pmat"),
00657                                                  0, learner->getTestCostNames());
00658                 if (save_test_confidence)
00659                     test_confidence = new FileVMatrix(splitdir / (setname + "_confidence.pmat"),
00660                                                       0, 2 * outputsize);
00661             }
00662 
00663             test_stats->forget();
00664                     
00665             if (testset->length() == 0)
00666                 PLWARNING("PTester:: test set %s is of length 0, costs will be set to -1",
00667                           setname.c_str());
00668 
00669             // Before each test set, reset the internal state of the learner
00670             learner->resetInternalState();
00671 
00672             learner->test(testset, test_stats, test_outputs, test_costs);
00673             //if (reset_stats)
00674             test_stats->finalize();
00675             if (is_splitdir && save_stat_collectors)
00676                 PLearn::save(splitdir / (setname + "_stats.psave"), test_stats);
00677 
00678             perf_evaluators_t::iterator it = perf_evaluators.begin();
00679             const perf_evaluators_t::iterator itend = perf_evaluators.end();
00680             while (it != itend)
00681             {
00682                 PPath perf_eval_dir;
00683                 if (is_splitdir)
00684                     perf_eval_dir = splitdir / setname / ("perfeval_" + it->first);
00685                 Vec perf_costvals = it->second->evaluatePerformance(learner, testset, test_outputs, perf_eval_dir);
00686                 TVec<string> perf_costnames = it->second->getCostNames();
00687                 if (perf_costvals.length()!=perf_costnames.length())
00688                     PLERROR("vector of costs returned by performance evaluator differ in size with its vector of costnames");
00689                 map<string, real>& costmap = perf_eval_costs[setnum][it->first];
00690                 for (int costi = 0; costi < perf_costnames.length(); costi++)
00691                     costmap[perf_costnames[costi]] = perf_costvals[costi];
00692                 ++it;
00693             }
00694             computeConfidence(testset, test_confidence);
00695         }
00696     }
00697 
00698     Vec splitres(1 + nstats);
00699     splitres[0] = splitnum;
00700 
00701     for (int k = 0; k < nstats; k++)
00702     {
00703         // If we ask for a test-set that's beyond what's currently
00704         // available, OR we are asking for test-statistics in
00705         // train-only mode, then the statistic is MISSING_VALUE.
00706         StatSpec& sp = statspecs[k];
00707         if (sp.setnum>=stcol.length() ||
00708             (! should_test && sp.setnum > 0))
00709         {
00710             splitres[k+1] = MISSING_VALUE;
00711         }
00712         else
00713         {
00714             string left, right;
00715             split_on_first(sp.intstatname, ".",left,right);
00716             if (right != "" && perf_evaluators.find(left) != perf_evaluators.end())
00717             {
00718                 // looks like a cost from a performance evaluator
00719                 map<string, real>& costmap = perf_eval_costs[sp.setnum][left];
00720                 if (costmap.find(right) == costmap.end())
00721                     PLERROR("No cost named %s appears to be returned by evaluator %s",
00722                             right.c_str(), left.c_str());
00723                 splitres[k+1] = costmap[right];
00724             }
00725             else
00726                 // must be a cost from a stats collector
00727                 splitres[k+1] = stcol[sp.setnum]->getStat(sp.intstatname);
00728         }
00729     }
00730 
00731     return splitres;
00732 }
00733 
00735 // perform //
00737 Vec PTester::perform(bool call_forget)
00738 {
00739     if (!learner)
00740         PLERROR("No learner specified for PTester.");
00741     if (!splitter)
00742         PLERROR("No splitter specified for PTester");
00743 
00744     const int nstats = statnames_processed.length();
00745     Vec global_result(nstats);
00746 
00747     if (expdir != "")
00748     {
00749         if (pathexists(expdir) && enforce_clean_expdir)
00750             PLERROR("Directory (or file) %s already exists.\n"
00751                     "First move it out of the way.", expdir.c_str());
00752         if (!force_mkdir(expdir))
00753             PLERROR("In PTester Could not create experiment directory %s",expdir.c_str());
00754         expdir = expdir.absolute() / "";
00755 
00756         // Save this tester description in the expdir
00757         if (save_initial_tester)
00758             PLearn::save(expdir / "tester.psave", *this);
00759     }
00760 
00761     if(redirect_stdout && ! expdir.isEmpty()){
00762         pout.flush();
00763         pout=openFile(expdir/"stdout",PStream::raw_ascii,"w");
00764     }
00765     if(redirect_stderr && ! expdir.isEmpty()){
00766         perr.flush();
00767         perr=openFile(expdir/"stderr",PStream::raw_ascii,"w");
00768     }
00769 
00770     splitter->setDataSet(dataset);
00771 
00772     const int nsplits = splitter->nsplits();
00773     if (nsplits > 1)
00774         call_forget = true;
00775 
00776     // Global stats collector
00777     PP<VecStatsCollector> global_statscol;
00778     if (global_template_stats_collector)
00779     {
00780         CopiesMap copies;
00781         global_statscol = global_template_stats_collector->deepCopy(copies);
00782         global_statscol->build();
00783         global_statscol->forget();
00784     }
00785     else
00786         global_statscol = new VecStatsCollector();
00787 
00788     // Stat specs
00789     TVec<StatSpec> statspecs(nstats);
00790     for(int k = 0; k < nstats; k++)
00791     {
00792         statspecs[k].init(statnames_processed[k]);
00793     }
00794 
00795     //no ACC stats for parallel perform
00796     for (int k = 0; k < nstats; k++)
00797         if (statspecs[k].extstat == "ACC")
00798             PLERROR("ACC stats not supported anymore; please adapt PTester::perform to your needs.");
00799 
00800 
00801     // The vmat in which to save global result stats specified in statnames
00802     VMat global_stats_vm;
00803     // The vmat in which to save per split result stats
00804     VMat split_stats_vm;
00805         
00806     need_to_save_test_names = false; // Reset to default 'false' value.
00807     if (!expdir.isEmpty() && report_stats)
00808     {
00809         need_to_save_test_names = save_test_names;
00810         global_stats_vm = new FileVMatrix(expdir / "global_stats.pmat",
00811                                           1, nstats);
00812         for (int k = 0; k < nstats; k++)
00813             global_stats_vm->declareField(k, statspecs[k].statName());
00814         global_stats_vm->saveFieldInfos();
00815 
00816         if(save_split_stats){
00817             split_stats_vm = new FileVMatrix(expdir / "split_stats.pmat",
00818                                              nsplits, 1 + nstats);
00819             split_stats_vm->declareField(0, "splitnum");
00820             for (int k = 0; k < nstats; k++)
00821                 split_stats_vm->declareField(k+1, statspecs[k].setname + "." + statspecs[k].intstatname);
00822             split_stats_vm->saveFieldInfos();
00823         }
00824     }
00825 
00826     PLearnService& service(PLearnService::instance());
00827     int nservers= min(nsplits, service.availableServers());
00828 
00829     if(nservers > 1 && parallelize_here && (!should_train || call_forget))
00830     {
00831         TVec<PP<RemotePLearnServer> > servers= service.reserveServers(nsplits);
00832         map<PP<RemotePLearnServer>, int> testers_ids;
00833         map<PP<RemotePLearnServer>, int> splitnums;
00834         for (int splitnum= 0; splitnum < nservers && splitnum < nsplits; ++splitnum)
00835             servers[splitnum]->newObjectAsync(*this);
00836 
00837         int splits_called= 0;
00838         //int testers_created= nservers;
00839         for (int splits_done= 0; nservers > 0;)//splits_done < nsplits;)
00840         {
00841             PP<RemotePLearnServer> s= service.waitForResult();
00842             if(testers_ids.find(s) == testers_ids.end())
00843             {
00844                 if(splits_called < nsplits)
00845                 {
00846                     int id;
00847                     s->getResults(id);
00848                     testers_ids[s]= id;
00849                     s->callMethod(id, "perform1Split", splits_called, call_forget);
00850                     splitnums[s]= splits_called;
00851                     ++splits_called;
00852                 }
00853                 else
00854                 {
00855                     s->getResults(); // tester deleted
00856                     service.freeServer(s);
00857                     --nservers;
00858                 }
00859             }
00860             else // get split result
00861             {
00862                 Vec splitres;
00863                 s->getResults(splitres);
00864                 ++splits_done;
00865                 if (split_stats_vm)
00866                 {
00867                     split_stats_vm->putRow(splitnums[s],splitres);
00868                     split_stats_vm->flush();
00869                 }
00870             
00871                 global_statscol->update(splitres.subVec(1, nstats));
00872 
00873                 if(splits_called < nsplits)//call for another split
00874                 {
00875                     s->callMethod(testers_ids[s], "perform1Split", splits_called, call_forget);
00876                     splitnums[s]= splits_called;
00877                     ++splits_called;
00878                 }
00879                 else
00880                 {
00881                     s->deleteObjectAsync(testers_ids[s]);
00882                     testers_ids.erase(s);
00883                 }
00884             }
00885         }
00886     }
00887     else
00888         for (int splitnum= 0; splitnum < nsplits; ++splitnum)
00889         {
00890             Vec splitres= perform1Split(splitnum, call_forget);
00891             
00892             if (split_stats_vm)
00893             {
00894                 split_stats_vm->putRow(splitnum, splitres);
00895                 split_stats_vm->flush();
00896             }
00897             
00898             global_statscol->update(splitres.subVec(1, nstats));
00899         }
00900 
00901 
00902     global_statscol->finalize();
00903     for (int k = 0; k < nstats; k++)
00904         global_result[k] = global_statscol->getStats(k).getStat(statspecs[k].extstat);
00905 
00906     if (global_stats_vm)
00907         global_stats_vm->appendRow(global_result);
00908 
00909 #if USING_MPI
00910     if (PLMPI::rank == 0)
00911 #endif
00912     // Perform the final commands provided in final_commands.
00913     for (int i = 0; i < final_commands.length(); i++)
00914     {
00915         system(final_commands[i].c_str());
00916     }
00917 
00918     return global_result;
00919 }
00920 
00921 void PTester::computeConfidence(VMat test_set, VMat confidence)
00922 {
00923     PLASSERT(learner);
00924     if (!confidence)
00925         return;
00926     PP<ProgressBar> pb;
00927     const int n = test_set.length();
00928     if (learner->report_progress)
00929         pb = new ProgressBar("Computing Confidence Intervals", n);
00930     Vec input, target, output(learner->outputsize());
00931     TVec< pair<real,real> > intervals;
00932     Vec intervals_real;
00933     real weight;
00934     for (int i=0 ; i<n ; ++i) {
00935         if (pb)
00936             pb->update(i);
00937         test_set.getExample(i, input, target, weight);
00938         learner->computeOutput(input,output);
00939         learner->computeConfidenceFromOutput(input,output,0.95,intervals);
00940         intervals_real.resize(2*intervals.size());
00941         for (int j=0 ; j<intervals.size() ; ++j) {
00942             intervals_real[2*j] = intervals[j].first;
00943             intervals_real[2*j+1] = intervals[j].second;
00944         }
00945         confidence->putOrAppendRow(i,intervals_real);
00946     }
00947 }
00948 
00950 // setStatNames //
00952 void PTester::setStatNames(const TVec<string>& the_statnames,
00953                            bool call_build)
00954 {
00955     statnames.resize(the_statnames.length());
00956     statnames << the_statnames;
00957     if (call_build)
00958         build();
00959 }
00960 
00962 // getStatNames //
00964 TVec<string> PTester::getStatNames()
00965 {
00966     return statnames_processed;
00967 }
00968 
00969 
00970 //#####  StatSpec  #########################################################
00971 
00972 void StatSpec::init(const string& statname)
00973 {
00974     parseStatname(statname);
00975 }
00976 
00977 void StatSpec::parseStatname(const string& statname)
00978 {
00979     PStream in = openString(statname, PStream::plearn_ascii);
00980     if(in.smartReadUntilNext("[", extstat)==EOF)
00981         PLERROR("No opening bracket found in statname %s", statname.c_str());
00982     string token;
00983     int nextsep = in.smartReadUntilNext(".[",token);
00984     if(nextsep==EOF)
00985         PLERROR("Expected dataset.xxxSTATxxx after the opening bracket. Got %s", token.c_str());
00986     else if(nextsep=='[') // Old format (for backward compatibility) ex: E[E[train.mse]]
00987     {
00988         PLWARNING("In StatSpec::parseStatname - You are still using the old statnames format, please use the new one!");
00989         // TODO Remove the old format some day?
00990         intstatname = token;
00991         if(in.smartReadUntilNext(".",setname)==EOF)
00992             PLERROR("Error while parsing statname: expected a dot");
00993         string costname;
00994         if(in.smartReadUntilNext("]",costname)==EOF)
00995             PLERROR("Error while parsing statname: expected a closing bracket");
00996         intstatname = intstatname+"["+costname+"]";
00997     }
00998     else // We've read a dot. That's the new format E[train.E[mse]]
00999     {
01000         setname = token;
01001         if(in.smartReadUntilNext("]",intstatname)==EOF)
01002             PLERROR("Error while parsing statname: expected a closing bracket");
01003     }
01004 
01005     if(setname=="train")
01006         setnum = 0;
01007     else if(setname=="test")
01008         setnum = 1;
01009     else if(setname.substr(0,4)=="test")
01010     {
01011         setnum = toint(setname.substr(4));
01012         if(setnum==0)
01013             PLERROR("In parseStatname: use the name train instead of test0.\n"
01014                     "The first set of a split is the training set. The following are test sets named test1 test2 ...");
01015         if(setnum<=0)
01016             PLERROR("In parseStatname: parse error for %s",statname.c_str());
01017     }
01018     else
01019         PLERROR("In parseStatname: parse error for %s",statname.c_str());
01020 }
01021 
01023 // makeDeepCopyFromShallowCopy //
01025 void PTester::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01026 {
01027     inherited::makeDeepCopyFromShallowCopy(copies);
01028     deepCopyField(statnames, copies);
01029     deepCopyField(statnames_processed, copies);
01030     deepCopyField(dataset, copies);
01031     deepCopyField(final_commands, copies);
01032     deepCopyField(global_template_stats_collector, copies);
01033     deepCopyField(learner, copies);
01034     deepCopyField(splitter, copies);
01035     deepCopyField(statmask, copies);
01036     deepCopyField(template_stats_collector, copies);
01037     deepCopyField(perf_evaluators, copies);
01038 
01039 }
01040 
01041 } // end of namespace PLearn
01042 
01043 
01044 /*
01045   Local Variables:
01046   mode:c++
01047   c-basic-offset:4
01048   c-file-style:"stroustrup"
01049   c-file-offsets:((innamespace . 0)(inline-open . 0))
01050   indent-tabs-mode:nil
01051   fill-column:79
01052   End:
01053 */
01054 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines