PLearn 0.1
Learner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // Learner.cc
00004 //
00005 // Copyright (C) 1998-2002 Pascal Vincent
00006 // Copyright (C) 1999-2002 Yoshua Bengio, Nicolas Chapados, Charles Dugas, Rejean Ducharme, Universite de Montreal
00007 // Copyright (C) 2001,2002 Francis Pieraut, Jean-Sebastien Senecal
00008 // Copyright (C) 2002 Frederic Morin, Xavier Saint-Mleux, Julien Keable
00009 // 
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are met:
00012 // 
00013 //  1. Redistributions of source code must retain the above copyright
00014 //     notice, this list of conditions and the following disclaimer.
00015 // 
00016 //  2. Redistributions in binary form must reproduce the above copyright
00017 //     notice, this list of conditions and the following disclaimer in the
00018 //     documentation and/or other materials provided with the distribution.
00019 // 
00020 //  3. The name of the authors may not be used to endorse or promote
00021 //     products derived from this software without specific prior written
00022 //     permission.
00023 // 
00024 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00025 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00026 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00027 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00028 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00029 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00030 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00031 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00032 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00033 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00034 // 
00035 // This file is part of the PLearn library. For more information on the PLearn
00036 // library, go to the PLearn Web site at www.plearn.org
00037 
00038 
00039  
00040 
00041 /* *******************************************************      
00042  * $Id: Learner.cc 4270 2005-10-19 20:04:37Z ducharme $
00043  ******************************************************* */
00044 
00045 #include "Learner.h"
00046 #include <plearn/io/TmpFilenames.h>
00047 #include <plearn/io/load_and_save.h>
00048 #include <plearn/base/stringutils.h>
00049 #include <plearn/io/MPIStream.h>
00050 #include <plearn/vmat/FileVMatrix.h>
00051 #include <plearn/vmat/RemoveRowsVMatrix.h>
00052 #include <plearn/sys/PLMPI.h>
00053 #include <plearn/io/StdPStreamBuf.h>
00054 
00055 namespace PLearn {
00056 using namespace std;
00057 
00058 Vec Learner::tmp_input; // temporary input vec
00059 Vec Learner::tmp_target; // temporary target vec
00060 Vec Learner::tmp_weight; // temporary weight vec
00061 Vec Learner::tmp_output; // temporary output vec
00062 Vec Learner::tmp_costs; // temporary costs vec
00063 
00064 PStream& /*oassignstream&*/ Learner::default_vlog()
00065 {
00066     //  static oassignstream default_vlog = cout;
00067     static PStream default_vlog(&cout);
00068     default_vlog.outmode=PStream::raw_ascii;
00069     return default_vlog;
00070 }
00071 int Learner::use_file_if_bigger = 64000000L;
00072 bool Learner::force_saving_on_all_processes = false;
00073 
00074 Learner::Learner(int the_inputsize, int the_targetsize, int the_outputsize)
00075     :train_objective_stream(0), epoch_(0), distributed_(false),
00076      inputsize_(the_inputsize), targetsize_(the_targetsize), outputsize_(the_outputsize), 
00077      weightsize_(0), dont_parallelize(false), save_at_every_epoch(false), save_objective(true), best_step(0)
00078 {
00079     test_every = 1;
00080     minibatch_size = 1; // by default call use, not apply
00081     setEarlyStopping(-1, 0, 0); // No early stopping by default
00082     vlog = default_vlog();
00083     report_test_progress_every = 10000;
00084     measure_cpu_time_first=false;
00085     setTestStatistics(mean_stats() & stderr_stats());
00086 }
00087 
00088 PLEARN_IMPLEMENT_ABSTRACT_OBJECT(Learner, "DEPRECATED CLASS: Derive from PLearner instead", "NO HELP");
00089 void Learner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00090 {
00091     Object::makeDeepCopyFromShallowCopy(copies);
00092     //Measurer::makeDeepCopyFromShallowCopy(copies);
00093     //deepCopyField(test_sets, copies);
00094     //deepCopyField(measurers, copies);
00095     deepCopyField(avg_objective, copies);
00096     deepCopyField(avgsq_objective, copies);
00097     deepCopyField(test_costfuncs, copies);
00098     deepCopyField(test_statistics, copies);
00099 }
00100 
00101 void Learner::outputResultLineToFile(const string & fname, const Vec& results,bool append,const string& names)
00102 {
00103 #if __GNUC__ < 3
00104     ofstream teststream(fname.c_str(),ios::out|(append?ios::app:0));
00105 #else
00106     ofstream teststream(fname.c_str(),ios_base::out|(append?ios_base::app:static_cast<ios::openmode>(0)));
00107 #endif
00108     // norman: added WIN32 check
00109 #if __GNUC__ < 3 && !defined(WIN32)
00110     if(teststream.tellp()==0)
00111 #else
00112         if(teststream.tellp() == streampos(0))
00113 #endif
00114             teststream << "#: epoch " << names << endl;
00115     teststream << setw(5) << epoch_ << "  " << results << endl;
00116 }
00117 
00118 string Learner::basename() const
00119 {       
00120     if(!experiment_name.empty())
00121     {
00122         PLWARNING("** Warning: the experiment_name system is DEPRECATED, please use the expdir system from now on, through setExperimentDirectory, and don't set an experiment_name. For now I'll be using the specified experiment_name=%s as the default basename for your results, but this won't be supported in the future",experiment_name.c_str());
00123         return experiment_name;
00124     }
00125     else if(expdir.empty())
00126     {
00127         PLERROR("Problem in Learner: Please call setExperimentDirectory for your learner prior to calling a train/test");
00128     }
00129     else if(!train_set)
00130     {
00131         PLWARNING("You should call setTrainingSet at the beginning of the train method in class %s ... Using 'unknown' as alias for now...", classname().c_str());
00132         return expdir + "unknown";
00133     }
00134     /* Aliases are now removed.
00135        else if(train_set->getAlias().empty())
00136        {
00137        //PLWARNING("The training set has no alias defined for it (you could call setAlias(...)) Using 'unknown' as alias");
00138        return expdir + "unknown";
00139        }
00140        return expdir+train_set->getAlias();
00141     */
00142     PLERROR("In Learner::basename - The alias system is now out-of-order, update your code !");
00143     return "";
00144 }
00145 
00146 
00147 void Learner::declareOptions(OptionList& ol)
00148 {
00149     declareOption(ol, "inputsize", &Learner::inputsize_, OptionBase::buildoption, 
00150                   "dimensionality of input vector \n");
00151 
00152     declareOption(ol, "outputsize", &Learner::outputsize_, OptionBase::buildoption, 
00153                   "dimensionality of output \n");
00154 
00155     declareOption(ol, "targetsize", &Learner::targetsize_, OptionBase::buildoption, 
00156                   "dimensionality of target \n");
00157 
00158     declareOption(ol, "weightsize", &Learner::weightsize_, OptionBase::buildoption, 
00159                   "Number of weights within target.  The last 'weightsize' fields of the target vector will be used as cost weights.\n"
00160                   "This is usually 0 (no weight) or 1 (1 weight per sample). Special loss functions may be able to give a meaning\n"
00161                   "to weightsize>1. Not all learners support weights.");
00162 
00163     declareOption(ol, "dont_parallelize", &Learner::dont_parallelize, OptionBase::buildoption, 
00164                   "By default, MPI parallelization done at a given level prevents further parallelization\n"
00165                   "at levels further down. If true, this means *don't parallelize processing at this level*");
00166 
00167     declareOption(ol, "earlystop_testsetnum", &Learner::earlystop_testsetnum, OptionBase::buildoption, 
00168                   "    index of test set (in test_sets) to use for early \n"
00169                   "    stopping (-1 means no early-stopping) \n");
00170 
00171     declareOption(ol, "earlystop_testresultindex", &Learner::earlystop_testresultindex, OptionBase::buildoption, 
00172                   "    index of statistic (as returned by test) to use\n");
00173 
00174     declareOption(ol, "earlystop_max_degradation", &Learner::earlystop_max_degradation, OptionBase::buildoption, 
00175                   "    maximum degradation in error from last best value\n");
00176 
00177     declareOption(ol, "earlystop_min_value", &Learner::earlystop_min_value, OptionBase::buildoption, 
00178                   "    minimum error beyond which we stop\n");
00179 
00180     declareOption(ol, "earlystop_min_improvement", &Learner::earlystop_min_improvement, OptionBase::buildoption, 
00181                   "    minimum improvement in error otherwise we stop\n");
00182 
00183     declareOption(ol, "earlystop_relative_changes", &Learner::earlystop_relative_changes, OptionBase::buildoption, 
00184                   "    are max_degradation and min_improvement relative?\n");
00185 
00186     declareOption(ol, "earlystop_save_best", &Learner::earlystop_save_best, OptionBase::buildoption, 
00187                   "    if yes, then return with saved 'best' model\n");
00188 
00189     declareOption(ol, "earlystop_max_degraded_steps", &Learner::earlystop_max_degraded_steps, OptionBase::buildoption, 
00190                   "    ax. nb of steps beyond best found (-1 means ignore) \n");
00191 
00192     declareOption(ol, "save_at_every_epoch", &Learner::save_at_every_epoch, OptionBase::buildoption, 
00193                   "    save learner at each epoch?\n");
00194 
00195     declareOption(ol, "save_objective", &Learner::save_objective, OptionBase::buildoption, 
00196                   "    save objective at each epoch?\n");
00197 
00198     declareOption(ol, "expdir", &Learner::expdir, OptionBase::buildoption,
00199                   "   The directory in which to save results \n");
00200 
00201     declareOption(ol, "test_costfuncs", &Learner::test_costfuncs, OptionBase::buildoption,
00202                   "   The cost functions used by the default useAndCost method \n");
00203 
00204     declareOption(ol, "test_statistics", &Learner::test_statistics, OptionBase::buildoption,
00205                   "   The test statistics used by the default test method \n",
00206                   "mean_stats() & stderr_stats()");
00207 
00208     declareOption(ol, "test_every", &Learner::test_every, OptionBase::buildoption, 
00209                   "   Compute cost on the test set every <test_every> steps (if 0, then no test is done during training\n");
00210 
00211     declareOption(ol, "minibatch_size", &Learner::minibatch_size, 
00212                   OptionBase::buildoption, 
00213                   "   size of blocks over which to perform tests, calling 'apply' if >1, otherwise caling 'use'\n");
00214 
00215     inherited::declareOptions(ol);
00216 }
00217 
00218 
00219 void Learner::setExperimentDirectory(const PPath& the_expdir) 
00220 { 
00221 #if USING_MPI
00222     if(PLMPI::rank==0) {
00223 #endif
00224         if(!force_mkdir(the_expdir))
00225         {
00226             PLERROR("In Learner::setExperimentDirectory Could not create experiment directory %s",the_expdir.c_str());}
00227 #if USING_MPI
00228     }
00229 #endif
00230     expdir = the_expdir.absolute();
00231 }
00232 
00233 void Learner::build_()
00234 {
00235     // Early stopping initialisation
00236     earlystop_previousval = FLT_MAX;
00237     earlystop_minval = FLT_MAX;
00238 }
00239 
00240 void Learner::build()
00241 {
00242     inherited::build();
00243     build_();
00244 }
00245 
00246 void Learner::forget()
00247 {
00248     // Early stopping parameters initialisation
00249     earlystop_previousval = FLT_MAX;
00250     earlystop_minval = FLT_MAX;
00251     epoch_ = 0;
00252 }
00253 
00254 void Learner::useAndCostOnTestVec(const VMat& test_set, int i, const Vec& output, const Vec& cost)
00255 {
00256     tmpvec.resize(test_set.width());
00257     if (minibatch_size > 1)
00258     {
00259         Vec inputvec(inputsize()*minibatch_size);
00260         Vec targetvec(targetsize()*minibatch_size);
00261         for (int k=0; k<minibatch_size;k++)
00262         {      
00263             test_set->getRow(i+k,tmpvec);
00264             for (int j=0; j<inputsize(); j++)
00265                 inputvec[k*inputsize()+j] = tmpvec[j];
00266             for (int j=0; j<targetsize(); j++)
00267                 targetvec[k*targetsize()+j] = tmpvec[inputsize()+j];
00268         }
00269         useAndCost(inputvec, targetvec, output, cost);
00270     }
00271     else
00272     {
00273         test_set->getRow(i,tmpvec);
00274         useAndCost(tmpvec.subVec(0,inputsize()), tmpvec.subVec(inputsize(),targetsize()), output, cost);
00275     }
00276 }
00277 
00278 void Learner::useAndCost(const Vec& input, const Vec& target, Vec output, Vec cost)
00279 {
00280     use(input,output);
00281     computeCost(input, target, output, cost);
00282 }
00283 
00284 void Learner::computeCost(const Vec& input, const Vec& target, const Vec& output, const Vec& cost)
00285 {
00286 
00287     for (int k=0; k<test_costfuncs.size(); k++)
00288         cost[k] = test_costfuncs[k](output, target);
00289 }
00290 
00291 void Learner::setTestDuringTrain(ostream& out, int every, Array<VMat> testsets)
00292 {
00293     // testout(&out);//testout = out;
00294     testout = new StdPStreamBuf(&out);
00295     test_every = every;
00296     test_sets = testsets;
00297 }
00298 
00299 void Learner::openTrainObjectiveStream()
00300 {
00301     string filename = expdir.empty() ? string("/dev/null") : expdir+"train.objective";
00302     if(train_objective_stream)
00303         delete train_objective_stream;
00304     train_objective_stream = new ofstream(filename.c_str(),ios::out|ios::app);
00305     ostream& out = *train_objective_stream;
00306     if(out.bad())
00307         PLERROR("could not open file %s for appending",filename.c_str());
00308     // norman: added WIN32 check
00309 #if __GNUC__ < 3 && !defined(WIN32)
00310     if(out.tellp()==0)
00311 #else
00312         if(out.tellp() == streampos(0))
00313 #endif
00314             out << "#  epoch | " << join(trainObjectiveNames()," | ") << endl;
00315 }
00316 
00317 ostream& Learner::getTrainObjectiveStream()
00318 {
00319     if(!train_objective_stream)
00320         openTrainObjectiveStream();
00321     return *train_objective_stream;
00322 }
00323 
00325 void Learner::openTestResultsStreams()
00326 {
00327     freeTestResultsStreams();
00328     int n = test_sets.size();
00329     test_results_streams.resize(n);
00330     for(int k=0; k<n; k++)
00331     {
00332         PLERROR("In Learner::openTestResultsStreams - Come on, do not use this class anymore, aliases are out-of-order");
00333         string filename = ""; // Dummy string to make the compiler happy.
00334         /*
00335           string alias = test_sets[k]->getAlias();
00336           // if(alias.empty())
00337           //   PLERROR("In Learner::openTestResultsStreams testset #%d has no defined alias",k);
00338           string filename = alias.empty() ? string("/dev/null") : expdir+alias+".results";
00339         */
00340         test_results_streams[k] = new ofstream(filename.c_str(), ios::out|ios::app);
00341         ostream& out = *test_results_streams[k];
00342         if(out.bad())
00343             PLERROR("In Learner::openTestResultsStreams could not open file %s for appending",filename.c_str());
00344         // norman: added WIN32 check
00345 #if __GNUC__ < 3 && !defined(WIN32)
00346         if(out.tellp() == 0)
00347 #else
00348             if(out.tellp() == streampos(0))
00349 #endif
00350                 out << "#: epoch " << join(testResultsNames()," ") << endl;
00351     }
00352 }
00353 
00354 void Learner::freeTestResultsStreams()
00355 {
00356     int n = test_results_streams.size();
00357     for(int k=0; k<n; k++)
00358         delete test_results_streams[k];
00359     test_results_streams.resize(0);
00360 }
00361 
00362 // There are as many test results streams as there are 
00363 ostream& Learner::getTestResultsStream(int k) 
00364 { 
00365     if(test_results_streams.size()==0)
00366         openTestResultsStreams();
00367     return *test_results_streams[k]; 
00368 }
00369 
00370 
00371 void Learner::setTestDuringTrain(Array<VMat> testsets)
00372 {  test_sets = testsets; }
00373 
00374 Learner::~Learner()
00375 {
00376     if(train_objective_stream)
00377         delete train_objective_stream;
00378     freeTestResultsStreams();
00379 }
00380 
00381 // which_testset and which_testresult select the appropriate testset and
00382 // costfunction to base early-stopping on from those that were specified
00383 // in setTestDuringTrain 
00384 // * degradation is the difference between the current value and the
00385 // smallest value ever attained, training will be stopped if it grows
00386 // beyond max_degradation 
00387 // * training will be stopped if current value goes below min_value
00388 // * training will be stopped if difference between previous value and
00389 // current value is below min_improvement
00390 void Learner::setEarlyStopping(int which_testset, int which_testresult, 
00391                                real max_degradation, real min_value, 
00392                                real min_improvement, bool relative_changes,
00393                                bool save_best, int max_degraded_steps)
00394 {
00395     earlystop_testsetnum = which_testset;
00396     earlystop_testresultindex = which_testresult;
00397     earlystop_max_degradation = max_degradation;
00398     earlystop_min_value = min_value;
00399     earlystop_previousval = FLT_MAX;
00400     earlystop_minval = FLT_MAX;
00401     earlystop_relative_changes = relative_changes;
00402     earlystop_min_improvement = min_improvement;
00403     earlystop_save_best = save_best;
00404     earlystop_max_degraded_steps = max_degraded_steps;
00405 }
00406 
00407 bool Learner::measure(int step, const Vec& costs)
00408 {
00409     earlystop_min_value /= minibatch_size;
00410     if (costs.length()<1)
00411         PLERROR("Learner::measure: costs.length_=%d should be >0", costs.length());
00412   
00413     //vlog << ">>> Now measuring for step " << step << " (costs = " << costs << " )" << endl; 
00414 
00415     //  if (objectiveout)
00416     //  objectiveout << setw(5) << step << "  " << costs << "\n";
00417 
00418 
00419     if (((!PLMPI::synchronized && each_cpu_saves_its_errors) || PLMPI::rank==0) && save_objective)
00420         outputResultLineToFile(basename()+".objective",costs,true,join(trainObjectiveNames()," "));
00421 
00422     bool muststop = false;
00423 
00424     if (((!PLMPI::synchronized && each_cpu_saves_its_errors) || PLMPI::rank==0) && save_at_every_epoch)
00425     {
00426         string fname  = basename()+".epoch"+tostring(epoch())+".psave";
00427         vlog << " >> Saving model in " << fname << endl;
00428         PLearn::save(fname, *this);
00429     }
00430     if ((test_every != 0) && (step%test_every==0))
00431     {
00432         int ntestsets = test_sets.size();
00433         Array<Vec> test_results(ntestsets);
00434         for (int n=0; n<ntestsets; n++) // looping over test sets
00435         {
00436             test_results[n] = test(test_sets[n]);
00437             if ((!PLMPI::synchronized && each_cpu_saves_its_errors) || PLMPI::rank==0)
00438                 PLERROR("In Learner::measure - Aliases are gone, so am I !");
00439             // outputResultLineToFile(basename()+"."+test_sets[n]->getAlias()+".hist.results",test_results[n],true,
00440             //                           join(testResultsNames()," "));
00441         }
00442 
00443         if (ntestsets>0 && earlystop_testsetnum>=0) // are we doing early stopping?
00444         {
00445             real earlystop_currentval = 
00446                 test_results[earlystop_testsetnum][earlystop_testresultindex];
00447             //  cout << earlystop_currentval << " " << earlystop_testsetnum << " " << earlystop_testresultindex << endl;
00448             // Check if early-stopping condition was met
00449             if ((earlystop_relative_changes &&
00450                  ((earlystop_currentval-earlystop_minval > 
00451                    earlystop_max_degradation * abs(earlystop_minval))
00452                   || (earlystop_currentval < earlystop_min_value)
00453                   || (earlystop_previousval-earlystop_currentval < 
00454                       earlystop_min_improvement * abs(earlystop_previousval)))) ||
00455                 (!earlystop_relative_changes &&
00456                  ((earlystop_currentval-earlystop_minval > earlystop_max_degradation)
00457                   || (earlystop_currentval < earlystop_min_value)
00458                   || (earlystop_previousval-earlystop_currentval < 
00459                       earlystop_min_improvement))) ||
00460                 (earlystop_max_degraded_steps>=0 &&
00461                  (step-best_step>=earlystop_max_degraded_steps) && 
00462                  (earlystop_minval < FLT_MAX)))
00463             { // earlystopping met
00464                 if (earlystop_save_best)
00465                 {
00466                     string fname  = basename()+".psave";
00467                     vlog << "Met early-stopping condition!" << endl;
00468                     vlog << "earlystop_currentval = " << earlystop_currentval << endl;
00469                     vlog << "earlystop_minval = " << earlystop_minval << endl;
00470                     vlog << "threshold = " << earlystop_max_degradation*earlystop_minval << endl;
00471                     vlog << "STOPPING (reloading best model)" << endl;
00472                     if(expdir.empty()) // old deprecated mode
00473                         load();
00474                     else
00475                         PLearn::load(fname,*this);          
00476                 }
00477                 else
00478                     cout << "Result for benchmark is: " << test_results << endl;
00479                 muststop = true;
00480             }
00481             else // earlystopping not met
00482             {
00483                 earlystop_previousval = earlystop_currentval;
00484                 if (PLMPI::rank==0 && earlystop_save_best
00485                     && (earlystop_currentval < earlystop_minval))
00486                 {
00487                     string fname  = basename()+".psave";
00488                     vlog << "saving model in " << fname <<  " because of earlystopping / improvement: " << endl;
00489                     vlog << "earlystop_currentval = " << earlystop_currentval << endl;
00490                     vlog << "earlystop_minval = " << earlystop_minval << endl;
00491                     PLearn::save(fname,*this);
00492                     // update .results file
00493                     if ((!PLMPI::synchronized && each_cpu_saves_its_errors) || PLMPI::rank==0)
00494                         PLERROR("In Learner::measure - Aliases are gone, so am I !");
00495                     /*
00496                       for (int n=0; n<ntestsets; n++) // looping over test sets
00497                       outputResultLineToFile(basename()+"."+test_sets[n]->getAlias()+".results",test_results[n],false,
00498                       join(testResultsNames()," "));
00499                     */
00500                     cout << "Result for benchmark is: " << test_results << endl;
00501                 }
00502             }
00503             if (earlystop_currentval < earlystop_minval)
00504             {
00505                 earlystop_minval = earlystop_currentval;
00506                 best_step = step;
00507                 if(PLMPI::rank==0)
00508                     vlog << "currently best step at " << best_step << " with " << earlystop_currentval << " " << test_results << endl;        
00509             }
00510         } 
00511         else
00512             // save tests in .results
00513             if ((!PLMPI::synchronized && each_cpu_saves_its_errors) || PLMPI::rank==0)
00514                 PLERROR("In Learner::measure - Aliases are gone, so am I !");
00515         /*
00516           for (int n=0; n<ntestsets; n++) // looping over test sets
00517           outputResultLineToFile(basename()+"."+test_sets[n]->getAlias()+".results",test_results[n],false,
00518           join(testResultsNames()," "));
00519         */
00520     }
00521 
00522     for (int i=0; i<measurers.size(); i++)
00523         muststop = muststop || measurers[i]->measure(step,costs);
00524 
00525     ++epoch_;
00526 
00527 // BUG: This doesn't work as intented in certain cases (ie. me!)
00528 //#if USING_MPI
00529 //MPI_Barrier(MPI_COMM_WORLD);
00530 //#endif
00531 
00532     return muststop;
00533 }
00534 
00535 // Call the 'use' method many times on the first inputsize() elements of
00536 // each row of a 'data' VMat, and put the
00537 // machine's 'outputs' in a writable VMat (e.g. maybe a file, or a matrix).
00538 void Learner::apply(const VMat& data, VMat outputs)
00539 {
00540     int n=data.length();
00541     Vec data_row(data.width());
00542     Vec input = data_row.subVec(0,inputsize());
00543     Vec output(outputsize());
00544     for (int i=0;i<n;i++)
00545     {
00546         data->getRow(i,data_row); // also gets input_row and target
00547         use(input,output);
00548         outputs->putRow(i,output);
00549     }
00550 }
00551 
00552 // This method calls useAndCost repetitively on all the rows of data,
00553 // throwing away the resulting output vectors but putting all the cost vectors
00554 // in the costs VMat.
00555 void Learner::computeCosts(const VMat& data, VMat costs)
00556 {
00557     int n=data.length();
00558     int ncostfuncs = costsize();
00559     Vec output_row(outputsize());
00560     Vec cost(ncostfuncs);
00561     cout << ncostfuncs << endl;
00562     for (int i=0;i*minibatch_size<n;i++)
00563     {
00564         useAndCostOnTestVec(data, i*minibatch_size, output_row, cost);
00565         costs->putRow(i,cost); // save the costs
00566     }
00567 }
00568 
00569 void Learner::computeLeaveOneOutCosts(const VMat& data, VMat costsmat)
00570 {
00571     // Vec testsample(inputsize()+targetsize());
00572     // Vec testinput = testsample.subVec(0,inputsize());
00573     // Vec testtarget = testsample.subVec(inputsize(),targetsize());
00574     Vec output(outputsize());
00575     Vec cost(costsize());
00576     // VMat subset;
00577     for(int i=0; i<data.length(); i++)
00578     {
00579         // data->getRow(i,testsample);
00580         train(removeRow(data,i));
00581         useAndCostOnTestVec(data, i, output, cost);
00582         // useAndCost(testinput,testtarget,output,cost);
00583         costsmat->putRow(i,cost);
00584         vlog << '.' << flush;
00585         if(i%100==0)
00586             vlog << '\n' << i << flush;
00587     }
00588 }
00589 
00590 void Learner::computeLeaveOneOutCosts(const VMat& data, VMat costsmat, CostFunc costf)
00591 {
00592     // norman: added parenthesis to clarify precendence
00593     if( (costsmat.length() != data.length()) | (costsmat.width()!=1))
00594         PLERROR("In Learner::computeLeaveOneOutCosts bad dimensions for costsmat VMat");
00595     Vec testsample(inputsize()+targetsize());
00596     Vec testinput = testsample.subVec(0,inputsize());
00597     Vec testtarget = testsample.subVec(inputsize(),targetsize());
00598     Vec output(outputsize());
00599     VMat subset;
00600     for(int i=0; i<data.length(); i++)
00601     {
00602         data->getRow(i,testsample);
00603         train(removeRow(data,i));
00604         use(testinput,output);
00605         costsmat->put(i,0,costf(output,testtarget));
00606         vlog << '.' << flush;
00607         if(i%100==0)
00608             vlog << '\n' << i << flush;
00609     }
00610 }
00611 
00612 // This method calls useAndCost repetitively on all the rows of data,
00613 // putting all the resulting output and cost vectors in the outputs and
00614 // costs VMat's.
00615 void Learner::applyAndComputeCosts(const VMat& data, VMat outputs, VMat costs)
00616 {
00617     int n=data.length();
00618     int ncostfuncs = costsize();
00619     Vec output_row(outputsize()*minibatch_size);
00620     Vec costs_row(ncostfuncs);
00621     for (int i=0;i*minibatch_size<n;i++)
00622     {
00623         // data->getRow(i,data_row); // also gets input_row and target
00624         useAndCostOnTestVec(data, i*minibatch_size, output_row, costs_row);
00625         // useAndCostOnTestVec(data, i, output_row, costs_row);
00626         // useAndCost(input_row,target,output_row,costs_row); // does the work
00627         //outputs->putRow(i,output_row); // save the outputs          
00628         for (int k=0; k<minibatch_size; k++)
00629         {
00630             outputs->putRow(i+k,output_row.subVec(k*outputsize(),outputsize())); // save the outputs
00631         }
00632         costs->putRow(i,costs_row); // save the costs
00633     }
00634 }
00635 
00636 Vec Learner::computeTestStatistics(const VMat& costs)
00637 {
00638     return concat(test_statistics.computeStats(costs));
00639 }
00640 
00641 
00642 // [PASCAL TODO:] 
00643 // 1) Handle weights properly
00644 // 2) Fix parallel code to use MPIStream for more efficient buffering (and check Yoshua's problem)
00645 // 4) let save parameters be VMatrix (on which to call append)
00646 
00650 
00651 Vec Learner::test(VMat test_set, const string& save_test_outputs, const string& save_test_costs)
00652 {
00653     int ncostfuncs = costsize();
00654 
00655     Vec output(outputsize()*minibatch_size);
00656     Vec cost(ncostfuncs);
00657     Mat output_block(minibatch_size,outputsize());
00658     Mat cost_block(minibatch_size,outputsize());
00659     if (minibatch_size>1)
00660         cost_block.resize(minibatch_size,costsize());  
00661 
00662     Vec result;
00663 
00664     VMat outputs; // possibly where to save outputs (and target)
00665     VMat costs; // possibly where to save costs
00666     if(PLMPI::rank==0 && !save_test_outputs.empty())
00667         outputs = new FileVMatrix(save_test_outputs, test_set.length(), outputsize());
00668 
00669     if(PLMPI::rank==0 && !save_test_costs.empty())
00670         costs = new FileVMatrix(save_test_costs, test_set.length(), ncostfuncs);
00671 
00672     int l = test_set.length();
00673     ProgressBar progbar(vlog, "Testing this old deprecated Learner you should not be using anymore", l);
00674     //  + test_set->getAlias(), l); // Aliases are deprecated.
00675     // ProgressBar progbar(cerr, "Testing " + test_set->getAlias(), l);
00676     // ProgressBar progbar(nullout(), "Testing " + test_set->getAlias(), l);
00677 
00678     // Do the test statistics require multiple passes?
00679     bool multipass = test_statistics.requiresMultiplePasses(); 
00680 
00681     // If multiple passes are required, make sure we save the individual costs in an appropriate 'costs' VMat
00682     if (PLMPI::rank==0 && save_test_costs.empty() && multipass)
00683     {
00684         TmpFilenames tmpfile(1);
00685         bool save_on_file = ncostfuncs*test_set.length() > use_file_if_bigger;
00686         if (save_on_file)
00687             costs = new FileVMatrix(tmpfile.addFilename(),test_set.length(),ncostfuncs);
00688         else
00689             costs = Mat(test_set.length(),ncostfuncs);
00690     }
00691 
00692     if(!multipass) // stats can be computed in a single pass?
00693         test_statistics.init(ncostfuncs);
00694 
00695     if(USING_MPI && PLMPI::synchronized && !dont_parallelize && PLMPI::size>1)
00696     { // parallel implementation
00697       // cout << "PARALLEL-DATA TEST" << endl;
00698 #if USING_MPI
00699         PLMPI::synchronized = false;
00700         if(PLMPI::rank==0) // process 0 gathers costs, computes statistics and writes stuff to output files if required
00701         {
00702             MPIStreams mpistreams(200,200);
00703 //          MPI_Status status;
00704             for(int i=0; i<l; i++)
00705             {
00706                 int pnum = 1 + i%(PLMPI::size-1);
00707                 if(!save_test_outputs.empty()) // receive and save output
00708                 {
00709 //                  MPI_Recv(cost.data(), cost.length(), PLMPI_REAL, pnum, 0, MPI_COMM_WORLD, &status);
00710                     //cerr << "/ MPI #" << PLMPI::rank << " received " << cost.length() << " values from MPI #" << pnum << endl;
00711                     PLearn::binread(mpistreams[pnum], output);
00712                     outputs->putRow(i, output);
00713                 }
00714 /*              else // receive output and cost only
00715                 {
00716                 MPI_Recv(output.data(), output.length()+cost.length(), PLMPI_REAL, pnum, 0, MPI_COMM_WORLD, &status);
00717                 //cerr << "/ MPI #" << PLMPI::rank << " received " << cost.length() << " values from MPI #" << pnum << endl;
00718                 outputs->putRow(i,output);
00719                 }*/
00720                 // receive cost
00721                 PLearn::binread(mpistreams[pnum], cost);
00722                 if(costs) // save costs?
00723                     costs->putRow(i,cost);
00724                 if(!multipass) // stats can be computed in a single pass?
00725                     test_statistics.update(cost);
00726                 progbar(i);
00727             }
00728         }
00729         else // other processes compute output and cost on different rows of the test_set and send them to process 0
00730         {
00731             MPIStream mpistream(0,200,200); // stream to node 0
00732             int step = PLMPI::size-1;
00733             for(int i=PLMPI::rank-1; i<l; i+=step)
00734             {
00735                 useAndCostOnTestVec(test_set, i, output, cost);
00736                 // test_set->getRow(i, sample);
00737                 // useAndCost(input,target,output,cost);
00738 /*              if(save_test_outputs.empty()) // send only cost
00739                 {
00740                 //cerr << "/ MPI #" << PLMPI::rank << " sending " << cost.length() << " values to MPI #0" << endl;
00741                 MPI_Send(cost.data(), cost.length(), PLMPI_REAL, 0, 0, MPI_COMM_WORLD);
00742                 }
00743                 else // send output and cost only
00744                 {
00745                 //cerr << "/ MPI #" << PLMPI::rank << " sending " << cost.length() << " values to MPI #0" << endl;
00746                 MPI_Send(output.data(), output.length()+cost.length(), PLMPI_REAL, 0, 0, MPI_COMM_WORLD);
00747                 }
00748                 }
00749                 }*/
00750                 if(!save_test_outputs.empty()) // send output
00751                     PLearn::binwrite(mpistream, output);
00752                 // send cost
00753                 PLearn::binwrite(mpistream, cost);
00754             }
00755         }
00756 
00757         // Finalize statistics computation
00758         int result_len;
00759         if(PLMPI::rank==0) // process 0 finalizes stats computation and broadcasts them
00760         {
00761             if(!multipass)
00762             {
00763                 test_statistics.finish();
00764                 result = concat(test_statistics.getResults());
00765             }
00766             else    
00767                 result = concat(test_statistics.computeStats(costs));
00768             result_len = result.length();
00769         }
00770         MPI_Bcast(&result_len, 1, MPI_INT, 0, MPI_COMM_WORLD);
00771         result.resize(result_len);
00772         MPI_Bcast(result.data(), result.length(), PLMPI_REAL, 0, MPI_COMM_WORLD);
00773         PLMPI::synchronized = true;
00774 #endif
00775     }
00776     else // default sequential implementation
00777     {
00778 
00779         for (int i=0; i<l; i++)
00780         {
00781             if (i%10000<minibatch_size) stop_if_wanted();
00782             if (minibatch_size>1 && i+minibatch_size<l)
00783             {
00784                 applyAndComputeCostsOnTestMat(test_set, i, output_block, cost_block);
00785                 i+=minibatch_size;
00786                 if(outputs) // save outputs?
00787                     outputs->putMat(i,0,output_block);
00788                 if(costs) // save costs?
00789                     costs->putMat(i,0,cost_block);
00790                 if(!multipass) // stats can be computed in a single pass?
00791                     test_statistics.update(cost_block);
00792             }
00793             else
00794             {
00795                 useAndCostOnTestVec(test_set, i, output, cost);
00796                 if(outputs) // save outputs?
00797                     outputs->putRow(i,output);
00798                 if(costs) // save costs?
00799                     costs->putRow(i,cost);
00800                 if(!multipass) // stats can be computed in a single pass?
00801                     test_statistics.update(cost);
00802             }
00803             // test_set->getRow(i, sample);
00804             // useAndCost(input, target, output, cost);
00805 
00806             progbar(i);
00807 
00808         }
00809 
00810         // Finalize statistics computation
00811         if(!multipass)
00812         {
00813             test_statistics.finish();
00814             result = concat(test_statistics.getResults());
00815         }
00816         else    
00817             result = concat(test_statistics.computeStats(costs));
00818 
00819     }
00820 
00821     return result;
00822 }
00823 
00824 void Learner::applyAndComputeCostsOnTestMat(const VMat& test_set, int i, const Mat& output_block, 
00825                                             const Mat& cost_block)
00826 {
00827     applyAndComputeCosts(test_set.subMatRows(i,output_block.length()),output_block,cost_block);
00828     //applyAndComputeCosts(test_set.subMatRows(i,output_block.length()*minibatch_size),output_block,cost_block);
00829 }
00830 
00831 void Learner::setModel(const Vec& options) { 
00832     PLERROR("setModel: method not implemented for this Learner (and DEPRECATED!!! DON'T IMPLEMENT IT, DON'T CALL IT. SEE setOption INSTEAD)"); 
00833 }
00834 
00835 int Learner::costsize() const 
00836 { return test_costfuncs.size(); }
00837 
00838 Array<string> Learner::costNames() const
00839 {
00840     Array<string> cost_names(test_costfuncs.size());
00841     for (int i=0; i<cost_names.size(); i++)
00842         cost_names[i] = space_to_underscore(test_costfuncs[i]->info());
00843     return cost_names;
00844 }
00845 
00846 Array<string> Learner::testResultsNames() const
00847 {
00848     Array<string> cost_names = costNames();
00849     Array<string> names(test_statistics.size()*cost_names.size());
00850     int k=0;
00851     for (int i=0;i<test_statistics.size();i++)
00852     {
00853         string stati = test_statistics[i]->info();
00854         for (int j=0;j<cost_names.size();j++)
00855             names[k++] = space_to_underscore(cost_names[j] + "." + stati);
00856     }
00857     return names;
00858 }
00859 
00860 Array<string> Learner::trainObjectiveNames() const
00861 { return testResultsNames(); }
00862 
00863 void Learner::oldwrite(ostream& out) const
00864 {
00865     writeHeader(out,"Learner",1);
00866     writeField(out,"inputsize",inputsize_);
00867     writeField(out,"outputsize",outputsize_);
00868     writeField(out,"targetsize",targetsize_);
00869     writeField(out,"test_every",test_every); // recently added by senecal
00870     writeField(out,"earlystop_testsetnum",earlystop_testsetnum);
00871     writeField(out,"earlystop_testresultindex",earlystop_testresultindex);
00872     writeField(out,"earlystop_max_degradation",earlystop_max_degradation);
00873     writeField(out,"earlystop_min_value",earlystop_min_value);
00874     writeField(out,"earlystop_min_improvement",earlystop_min_improvement);
00875     writeField(out,"earlystop_relative_changes",earlystop_relative_changes);
00876     writeField(out,"earlystop_save_best",earlystop_save_best);
00877     writeField(out,"earlystop_max_degraded_steps",earlystop_max_degraded_steps);
00878     writeField(out,"save_at_every_epoch",save_at_every_epoch);
00879     writeField(out,"experiment_name",experiment_name);
00880     writeField(out,"test_costfuncs",test_costfuncs);
00881     writeField(out,"test_statistics",test_statistics);
00882     writeFooter(out,"Learner");
00883 }
00884 
00885 /* TODO Remove (deprecated)
00886    void Learner::oldread(istream& in)
00887    {
00888    int version=readHeader(in,"Learner");
00889    if(version>=2)
00890    {
00891    readField(in,"expdir",expdir);
00892    readField(in,"epoch",epoch_);
00893    }
00894    readField(in,"inputsize",inputsize_);
00895    readField(in,"outputsize",outputsize_);
00896    readField(in,"targetsize",targetsize_);
00897    readField(in,"test_every",test_every);
00898    readField(in,"earlystop_testsetnum",earlystop_testsetnum);
00899    readField(in,"earlystop_testresultindex",earlystop_testresultindex);
00900    readField(in,"earlystop_max_degradation",earlystop_max_degradation);
00901    readField(in,"earlystop_min_value",earlystop_min_value);
00902    readField(in,"earlystop_min_improvement",earlystop_min_improvement);
00903    readField(in,"earlystop_relative_changes",earlystop_relative_changes);
00904    readField(in,"earlystop_save_best",earlystop_save_best);
00905    if (version>=1)
00906    readField(in,"earlystop_max_degraded_steps",earlystop_max_degraded_steps);
00907    else
00908    earlystop_max_degraded_steps=-1;
00909    readField(in,"save_at_every_epoch",save_at_every_epoch);
00910    readField(in,"experiment_name",experiment_name);
00911    readField(in,"test_costfuncs",test_costfuncs);
00912    readField(in,"test_statistics",test_statistics);
00913    readFooter(in,"Learner");
00914    }
00915 */
00916 
00917 void Learner::save(const PPath& filename) const
00918 {
00919 #if USING_MPI
00920     if (PLMPI::rank!=0 && !force_saving_on_all_processes)
00921         return;
00922 #endif
00923     if(!filename.empty())
00924         Object::save(filename);
00925     else if(!experiment_name.empty())
00926         Object::save(experiment_name);
00927     else
00928         PLERROR("Called Learner::save with an empty filename, while experiment_name is also empty. What file name am I supposed to use???? Anyway this method is DEPRECATED, you should call directly function PLearn::save(whatever_filename_you_want, the_object) ");
00929 }
00930 
00931 void Learner::load(const PPath& filename)
00932 {
00933     if (!filename.empty())
00934         Object::load(filename);
00935     else if (!experiment_name.empty())
00936         Object::load(experiment_name);
00937     else
00938         PLERROR("Called Learner::load with an empty filename, while experiment_name is also empty. What file name am I supposed to use???? Anyway this method is DEPRECATED, you should call directly function PLearn::load(whatever_filename_you_want, the_object) ");
00939 }
00940 
00941 void Learner::stop_if_wanted()
00942 {
00943     string stopping_filename = basename()+".stop";
00944     if (isfile(stopping_filename))
00945     {
00946 #ifdef PROFILE
00947         string profile_report_name = basename();
00948 #if USING_MPI
00949         profile_report_name += "_r" + tostring(PLMPI::rank);;
00950 #endif
00951         profile_report_name += ".profile";
00952         ofstream profile_report(profile_report_name.c_str());
00953         Profiler::report(profile_report);
00954 #endif
00955 #if USING_MPI
00956         MPI_Barrier(MPI_COMM_WORLD);
00957         if (PLMPI::rank==0)
00958         {
00959             string fname = basename()+".stopped.psave";
00960             PLearn::save(fname,*this);
00961             vlog << "saving and quitting because of stop signal" << endl;
00962             unlink(stopping_filename.c_str()); // remove file if possible
00963         }
00964         exit(0);
00965 #else
00966         unlink(stopping_filename.c_str()); // remove file if possible
00967         exit(0);
00968 #endif
00969     }
00970 }
00971 
00972 
00973 // NOTE: For backward compatibility, default version currently calls the
00974 // deprecated method use which should ultimately be removed...
00975 void Learner::computeOutput(const VVec& input, Vec& output) 
00976 {
00977     tmp_input.resize(input.length());
00978     tmp_input << input;
00979     use(tmp_input,output);
00980 }
00981 
00982 
00983 // NOTE: For backward compatibility, default version currently calls the
00984 // deprecated method computeCost which should ultimately be removed...
00985 void Learner::computeCostsFromOutputs(const VVec& input, const Vec& output, 
00986                                       const VVec& target, const VVec& weight,
00987                                       Vec& costs)
00988 {
00989     tmp_input.resize(input.length());
00990     tmp_input << input;
00991     tmp_target.resize(target.length());
00992     tmp_target << target;
00993     computeCost(input, target, output, costs);
00994 
00995     int nw = weight.length();
00996     if(nw>0)
00997     {
00998         tmp_weight.resize(nw);
00999         tmp_weight << weight;
01000         if(nw==1)  // a single scalar weight
01001             costs *= tmp_weight[0];
01002         else if(nw==costs.length()) // one weight per cost element
01003             costs *= tmp_weight;
01004         else 
01005             PLERROR("In computeCostsFromOutputs: don't know how to handle cost-weight vector of length %d while output vector has length %d", nw, output.length());
01006     }
01007 }
01008 
01009                                 
01010 void Learner::computeOutputAndCosts(const VVec& input, VVec& target, const VVec& weight,
01011                                     Vec& output, Vec& costs)
01012 {
01013     computeOutput(input, output);
01014     computeCostsFromOutputs(input, output, target, weight, costs);
01015 }
01016 
01017 void Learner::computeCosts(const VVec& input, VVec& target, VVec& weight, 
01018                            Vec& costs)
01019 {
01020     tmp_output.resize(outputsize());
01021     computeOutputAndCosts(input, target, weight, tmp_output, costs);
01022 }
01023 
01024 
01025 void Learner::newtrain(VecStatsCollector& stats)
01026 { PLERROR("newtrain not yet implemented for this learner"); }
01027 
01028 
01029 void Learner::newtest(VMat testset, VecStatsCollector& test_stats, 
01030                       VMat testoutputs, VMat testcosts)
01031 {
01032     PLERROR("Learner::newtrain not yet implemented");
01033 
01034 /*
01035   int l = testset.length();
01036   VVec input;
01037   VVec target;
01038   VVec weight;
01039 
01040   Vec output(testoutputs ?outputsize() :0);
01041   Vec costs(costsize());
01042 
01043   testset->defineSizes(inputsize(),targetsize(),weightsize());
01044 
01045   test_stats.forget();
01046 
01047   for(int i=0; i<l; i++)
01048   {
01049   testset.getSample(i, input, target, weight);
01050 
01051   if(testoutputs)
01052   {
01053   computeOutputAndCosts(input, target, weight, output, costs);
01054   testoutputs->putOrAppendRow(i,output);
01055   }
01056   else // no need to compute outputs
01057   computeCosts(input, target, weight, costs);
01058 
01059   if(testcosts)
01060   testcosts->putOrAppendRow(i, costs);
01061 
01062   test_stats.update(costs);
01063   }
01064 
01065   test_stats.finalize();
01066 
01067 */
01068 }
01069 
01070 
01071 } // end of namespace PLearn
01072 
01073 
01074 /*
01075   Local Variables:
01076   mode:c++
01077   c-basic-offset:4
01078   c-file-style:"stroustrup"
01079   c-file-offsets:((innamespace . 0)(inline-open . 0))
01080   indent-tabs-mode:nil
01081   fill-column:79
01082   End:
01083 */
01084 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines