PLearn 0.1
PLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearner.cc
00004 //
00005 // Copyright (C) 1998-2002 Pascal Vincent
00006 // Copyright (C) 1999-2002 Yoshua Bengio, Nicolas Chapados, Charles Dugas, Rejean Ducharme, Universite de Montreal
00007 // Copyright (C) 2001,2002 Francis Pieraut, Jean-Sebastien Senecal
00008 // Copyright (C) 2002 Frederic Morin, Xavier Saint-Mleux, Julien Keable
00009 // Copyright (C) 2007 Xavier Saint-Mleux, ApSTAT Technologies inc.
00010 // 
00011 // Redistribution and use in source and binary forms, with or without
00012 // modification, are permitted provided that the following conditions are met:
00013 // 
00014 //  1. Redistributions of source code must retain the above copyright
00015 //     notice, this list of conditions and the following disclaimer.
00016 // 
00017 //  2. Redistributions in binary form must reproduce the above copyright
00018 //     notice, this list of conditions and the following disclaimer in the
00019 //     documentation and/or other materials provided with the distribution.
00020 // 
00021 //  3. The name of the authors may not be used to endorse or promote
00022 //     products derived from this software without specific prior written
00023 //     permission.
00024 // 
00025 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00026 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00027 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00028 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00029 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00030 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00031 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00032 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00033 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00034 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00035 // 
00036 // This file is part of the PLearn library. For more information on the PLearn
00037 // library, go to the PLearn Web site at www.plearn.org
00038 
00039 
00040  
00041 
00042 /* *******************************************************      
00043  * $Id: PLearner.cc 10177 2009-05-05 20:25:48Z nouiz $
00044  ******************************************************* */
00045 
00046 #include "PLearner.h"
00047 #include <plearn/base/stringutils.h>
00048 #include <plearn/io/fileutils.h>
00049 #include <plearn/io/pl_log.h>
00050 #include <plearn/math/pl_erf.h>
00051 #include <plearn/vmat/FileVMatrix.h>
00052 #include <plearn/vmat/MemoryVMatrix.h>
00053 #include <plearn/vmat/RowsSubVMatrix.h>
00054 #include <plearn/misc/PLearnService.h>
00055 #include <plearn/misc/RemotePLearnServer.h>
00056 #include <plearn/vmat/PLearnerOutputVMatrix.h>
00057 #include <plearn/base/RemoteDeclareMethod.h>
00058 
00059 namespace PLearn {
00060 using namespace std;
00061 
00062 PLearner::PLearner()
00063     : n_train_costs_(-1),
00064       n_test_costs_(-1),
00065       seed_(1827),                           
00066       stage(0),
00067       nstages(1),
00068       report_progress(true),
00069       verbosity(1),
00070       nservers(0),
00071       test_minibatch_size(1),
00072       save_trainingset_prefix(""),
00073       parallelize_here(true),
00074       master_sends_testset_rows(false),
00075       use_a_separate_random_generator_for_testing(1827),
00076       finalized(false),
00077       inputsize_(-1),
00078       targetsize_(-1),
00079       weightsize_(-1),
00080       n_examples(-1),
00081       forget_when_training_set_changes(false)  
00082 {}
00083 
00084 PLEARN_IMPLEMENT_ABSTRACT_OBJECT(
00085     PLearner,
00086     "The base class for all PLearn learning algorithms",
00087     "PLearner provides a base class for all learning algorithms within PLearn.\n"
00088     "It presents an abstraction of learning that centers around a \"train-test\"\n"
00089     "paradigm:\n"
00090     "\n"
00091     "- Phase 1: TRAINING.  In this phase, one must first establish an experiment\n"
00092     "  directory (usually done by an enclosing PTester) to store any temporary\n"
00093     "  files that the learner might seek to create.  Then, one sets a training\n"
00094     "  set VMat (also done by the enclosing PTester), which contains the set of\n"
00095     "  input-target pairs that the learner should attempt to represent.  Finally\n"
00096     "  one calls the train() virtual member function to carry out the actual\n"
00097     "  action of training the model.\n"
00098     "\n"
00099     "- Phase 2: TESTING.  In this phase (to be done after training), one\n"
00100     "  repeatedly calls functions from the computeOutput() family to evaluate\n"
00101     "  the trained model on new input vectors.\n"
00102     "\n"
00103     "Note that the PTester class is the usual \"driver\" for a PLearner (and\n"
00104     "automatically calls the above functions in the appropriate order), in the\n"
00105     "usual scenario wherein one wants to evaluate the generalization performance\n"
00106     "on a dataset.\n"
00107     );
00108 
00110 // makeDeepCopyFromShallowCopy //
00112 void PLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00113 {
00114     inherited::makeDeepCopyFromShallowCopy(copies);
00115     deepCopyField(tmp_output,       copies);
00116     deepCopyField(train_set,        copies);
00117     deepCopyField(validation_set,   copies);
00118     deepCopyField(train_stats,      copies);
00119     deepCopyField(random_gen,       copies);
00120     deepCopyField(b_inputs,         copies);
00121     deepCopyField(b_targets,        copies);
00122     deepCopyField(b_outputs,        copies);
00123     deepCopyField(b_costs,          copies);
00124     deepCopyField(b_weights,        copies);
00125 }
00126 
00128 // declareOptions //
00130 void PLearner::declareOptions(OptionList& ol)
00131 {
00132     declareOption(
00133         ol, "expdir", &PLearner::expdir, OptionBase::buildoption | OptionBase::nosave | OptionBase::remotetransmit, 
00134         "Path of the directory associated with this learner, in which\n"
00135         "it should save any file it wishes to create. \n"
00136         "The directory will be created if it does not already exist.\n"
00137         "If expdir is the empty string (the default), then the learner \n"
00138         "should not create *any* file. Note that, anyway, most file creation and \n"
00139         "reporting are handled at the level of the PTester class rather than \n"
00140         "at the learner's. \n");
00141 
00142     declareOption(
00143         ol, "random_gen", &PLearner::random_gen, OptionBase::learntoption, 
00144         "The random number generator used in this learner. Constructed from the seed.\n");
00145 
00146     declareOption(
00147         ol, "seed", &PLearner::seed_, OptionBase::buildoption, 
00148         "The initial seed for the random number generator used in this\n"
00149         "learner, for instance for parameter initialization.\n"
00150         "If -1 is provided, then a 'random' seed is chosen based on time\n"
00151         "of day, ensuring that different experiments run differently.\n"
00152         "If 0 is provided, no (re)initialization of the random number\n"
00153         "generator is performed.\n"
00154         "With a given positive seed, build() and forget() should always\n"
00155         "initialize the parameters to the same values.");
00156 
00157     declareOption(
00158         ol, "stage", &PLearner::stage, OptionBase::learntoption, 
00159         "The current training stage, since last fresh initialization (forget()): \n"
00160         "0 means untrained, n often means after n epochs or optimization steps, etc...\n"
00161         "The true meaning is learner-dependant."
00162         "You should never modify this option directly!"
00163         "It is the role of forget() to bring it back to 0,\n"
00164         "and the role of train() to bring it up to 'nstages'...");
00165 
00166     declareOption(
00167         ol, "n_examples", &PLearner::n_examples, OptionBase::learntoption, 
00168         "The number of samples in the training set.\n"
00169         "Obtained from training set with setTrainingSet.");
00170 
00171     declareOption(
00172         ol, "inputsize", &PLearner::inputsize_, OptionBase::learntoption, 
00173         "The number of input columns in the data sets."
00174         "Obtained from training set with setTrainingSet.");
00175 
00176     declareOption(
00177         ol, "targetsize", &PLearner::targetsize_, OptionBase::learntoption, 
00178         "The number of target columns in the data sets."
00179         "Obtained from training set with setTrainingSet.");
00180 
00181     declareOption(
00182         ol, "weightsize", &PLearner::weightsize_, OptionBase::learntoption, 
00183         "The number of cost weight columns in the data sets."
00184         "Obtained from training set with setTrainingSet.");
00185 
00186     declareOption(
00187         ol, "forget_when_training_set_changes",
00188         &PLearner::forget_when_training_set_changes, OptionBase::buildoption, 
00189         "Whether or not to call the forget() method (re-initialize model \n"
00190         "as before training) in setTrainingSet when the\n"
00191         "training set changes (e.g. of dimension).");
00192 
00193     declareOption(
00194         ol, "nstages", &PLearner::nstages, OptionBase::buildoption, 
00195         "The stage until which train() should train this learner and return.\n"
00196         "The meaning of 'stage' is learner-dependent, but for learners whose \n"
00197         "training is incremental (such as involving incremental optimization), \n"
00198         "it is typically synonym with the number of 'epochs', i.e. the number \n"
00199         "of passages of the optimization process through the whole training set, \n"
00200         "since the last fresh initialisation.");
00201 
00202     declareOption(
00203         ol, "report_progress", &PLearner::report_progress, OptionBase::buildoption, 
00204         "should progress in learning and testing be reported in a ProgressBar.\n");
00205 
00206     declareOption(
00207         ol, "verbosity", &PLearner::verbosity, OptionBase::buildoption, 
00208         "Level of verbosity. If 0 should not write anything on perr. \n"
00209         "If >0 may write some info on the steps performed along the way.\n"
00210         "The level of details written should depend on this value.");
00211 
00212     declareOption(
00213         ol, "nservers", &PLearner::nservers, OptionBase::buildoption, 
00214         "DEPRECATED: use parallelize_here instead.\n"
00215         "Max number of computation servers to use in parallel with the main process.\n"
00216         "If <=0 no parallelization will occur at this level.\n",
00217         OptionBase::deprecated_level);
00218 
00219     declareOption(
00220         ol, "save_trainingset_prefix", &PLearner::save_trainingset_prefix,
00221         OptionBase::buildoption,
00222         "Whether the training set should be saved upon a call to\n"
00223         "setTrainingSet().  The saved file is put in the learner's expdir\n"
00224         "(assuming there is one) and has the form \"<prefix>_trainset_XXX.pmat\"\n"
00225         "The prefix is what this option specifies.  'XXX' is a unique\n"
00226         "serial number that is globally incremented with each saved\n"
00227         "setTrainingSet.  This option is useful when manipulating very\n"
00228         "complex nested learner structures, and you want to ensure that\n"
00229         "the inner learner is getting the correct results.  (Default="",\n"
00230         "i.e. don't save anything.)\n");
00231 
00232     declareOption(
00233         ol, "parallelize_here", &PLearner::parallelize_here, 
00234         OptionBase::buildoption | OptionBase::nosave,
00235         "Reserve remote servers at this level if true.\n");
00236 
00237     declareOption(
00238         ol, "master_sends_testset_rows", &PLearner::master_sends_testset_rows, 
00239         OptionBase::buildoption | OptionBase::nosave,
00240         "For parallel PLearner::test : wether the master should read the testset and\n"
00241         "send rows to the slaves, or send a serialized description of the testset.\n");
00242   
00243     declareOption(
00244         ol, "test_minibatch_size", &PLearner::test_minibatch_size,
00245         OptionBase::buildoption,
00246         "Size of minibatches used during testing to take advantage\n"
00247         "of efficient (possibly parallelized) implementations when\n"
00248         "multiple examples are processed at once. \n");
00249 
00250     declareOption(
00251         ol, "use_a_separate_random_generator_for_testing", 
00252         &PLearner::use_a_separate_random_generator_for_testing,
00253         OptionBase::buildoption,
00254         "This option allows to perform testing always in the same\n"
00255         "conditions in terms of the random generator (if testing involves\n"
00256         "some non-deterministic component, this can be useful in order\n"
00257         "to obtain repeatable test results).\n"
00258         "If non-zero, the base class test() method will use a different\n"
00259         "random generator than the rest of the code (i.e. training).\n"
00260         "The non-zero value is the seed to be used during testing.\n"
00261         "A value of -1 sets the seed differently each time depending on clock.\n"
00262         "(which is probably not desired here).\n"
00263         "Note that this option might not be taken into account in some\n"
00264         "sub-classes that override the PLearner's test method.");
00265 
00266     declareOption(
00267         ol, "finalized", &PLearner::finalized,
00268         OptionBase::learntoption,
00269         "(default false)"
00270         " After training(when finalized() is called) it will be set to true.\n"
00271         " When true, it mean the learner it won't be trained again and this\n"
00272         " allow some optimization.\n");
00273 
00274     inherited::declareOptions(ol);
00275 }
00276 
00278 // declareMethods //
00280 void PLearner::declareMethods(RemoteMethodMap& rmm)
00281 {
00282     // Insert a backpointer to remote methods; note that this is different from
00283     // declareOptions().
00284     rmm.inherited(inherited::_getRemoteMethodMap_());
00285 
00286     declareMethod(
00287         rmm, "setTrainingSet", &PLearner::setTrainingSet,
00288         (BodyDoc("Declares the training set.  Then calls build() and forget() if\n"
00289                  "necessary.\n"),
00290          ArgDoc ("training_set", "The training set VMatrix to set; should have\n"
00291                  "its inputsize, targetsize and weightsize fields set properly.\n"),
00292          ArgDoc ("call_forget", "Whether the forget() function should be called\n"
00293                  "upon setting the training set\n")));
00294 
00295     declareMethod(
00296         rmm, "getTrainingSet", &PLearner::getTrainingSet,
00297         (BodyDoc("Returns the current training set."),
00298          RetDoc ("The trainset")));
00299 
00300     declareMethod(
00301         rmm, "setExperimentDirectory", &PLearner::setExperimentDirectory,
00302         (BodyDoc("The experiment directory is the directory in which files related to\n"
00303                  "this model are to be saved.  If it is an empty string, it is understood\n"
00304                  "to mean that the user doesn't want any file created by this learner.\n"),
00305          ArgDoc ("expdir", "Experiment directory to set")));
00306 
00307     declareMethod(
00308         rmm, "getExperimentDirectory", &PLearner::getExperimentDirectory,
00309         (BodyDoc("This returns the currently set experiment directory\n"
00310                  "(see setExperimentDirectory)\n"),
00311          RetDoc ("Current experiment directory")));
00312 
00313     declareMethod(
00314         rmm, "outputsize", &PLearner::outputsize,
00315         (BodyDoc("Return the learner outputsize")));
00316     
00317     declareMethod(
00318         rmm, "setTrainStatsCollector", &PLearner::setTrainStatsCollector,
00319         (BodyDoc("Sets the statistics collector whose update() method will be called\n"
00320                  "during training.\n."),
00321          ArgDoc ("statscol", "The tatistics collector to set")));
00322 
00323     declareMethod(
00324         rmm, "getTrainStatsCollector", &PLearner::getTrainStatsCollector,
00325         (BodyDoc("Returns the statistics collector that was used during training.\n"),
00326          RetDoc ("Current training statistics collector")));
00327 
00328     declareMethod(
00329         rmm, "forget", &PLearner::forget,
00330         (BodyDoc("(Re-)initializes the PLearner in its fresh state (that state may depend\n"
00331                  "on the 'seed' option) and sets 'stage' back to 0 (this is the stage of\n"
00332                  "a fresh learner!)\n"
00333                  "\n"
00334                  "A typical forget() method should do the following:\n"
00335                  "\n"
00336                  "- call inherited::forget() to initialize the random number generator\n"
00337                  "  with the 'seed' option\n"
00338                  "\n"
00339                  "- initialize the learner's parameters, using this random generator\n"
00340                  "\n"
00341                  "- stage = 0;\n"
00342                  "\n"
00343                  "This method is typically called by the build_() method, after it has\n"
00344                  "finished setting up the parameters, and if it deemed useful to set or\n"
00345                  "reset the learner in its fresh state.  (remember build may be called\n"
00346                  "after modifying options that do not necessarily require the learner to\n"
00347                  "restart from a fresh state...)  forget is also called by the\n"
00348                  "setTrainingSet method, after calling build(), so it will generally be\n"
00349                  "called TWICE during setTrainingSet!\n")));
00350 
00351     declareMethod(
00352         rmm, "train", &PLearner::train,
00353         (BodyDoc("The role of the train method is to bring the learner up to\n"
00354                  "stage==nstages, updating the stats with training costs measured on-line\n"
00355                  "in the process.\n")));
00356 
00357 
00358     declareMethod(
00359         rmm, "sub_test", &PLearner::sub_test,
00360         (BodyDoc("Test on a given (chunk of a) testset and return stats, outputs and costs.  "
00361                  "Used by parallel test"),
00362          ArgDoc("testset","test set"),
00363          ArgDoc("test_stats","VecStatsCollector to use"),
00364          ArgDoc("rtestoutputs","wether to return outputs"),
00365          ArgDoc("rtestcosts","wether to return costs"),
00366          RetDoc ("tuple of (stats, outputs, costs)")));
00367 
00368     declareMethod(
00369         rmm, "test", &PLearner::remote_test,
00370         (BodyDoc("Test on a given testset and return stats, outputs and costs."),
00371          ArgDoc("testset","test set"),
00372          ArgDoc("test_stats","VecStatsCollector to use"),
00373          ArgDoc("rtestoutputs","whether to return outputs"),
00374          ArgDoc("rtestcosts","whether to return costs"),
00375          RetDoc ("tuple of (stats, outputs, costs)")));
00376 
00377 
00378     declareMethod(
00379         rmm, "resetInternalState", &PLearner::resetInternalState,
00380         (BodyDoc("If the learner is a stateful one (inherits from StatefulLearner),\n"
00381                  "this resets the internal state to its initial value; by default,\n"
00382                  "this function does nothing.")));
00383 
00384     declareMethod(
00385         rmm, "computeOutput", &PLearner::remote_computeOutput,
00386         (BodyDoc("On a trained learner, this computes the output from the input"),
00387          ArgDoc ("input", "Input vector (should have width inputsize)"),
00388          RetDoc ("Computed output (will have width outputsize)")));
00389 
00390     declareMethod(
00391         rmm, "computeOutputs", &PLearner::remote_computeOutputs,
00392         (BodyDoc("On a trained learner, this computes the output from the input, one\n"
00393                  "batch of examples at a time (one example per row of the arg. matrices.\n"),
00394          ArgDoc ("inputs", "Input matrix (batch_size x inputsize)"),
00395          RetDoc ("Resulting output matrix (batch_size x outputsize)")));
00396 
00397     declareMethod(
00398         rmm, "use", &PLearner::remote_use,
00399         (BodyDoc("Compute the output of a trained learner on every row of an\n"
00400                  "input VMatrix.  The outputs are stored in a .pmat matrix\n"
00401                  "under the specified filename."),
00402          ArgDoc ("input_vmat", "VMatrix containing the inputs"),
00403          ArgDoc ("output_pmat_fname", "Name of the .pmat to store the computed outputs")));
00404 
00405     declareMethod(
00406         rmm, "use2", &PLearner::remote_use2,
00407         (BodyDoc("Compute the output of a trained learner on every row of an\n"
00408                  "input VMatrix.  The outputs are returned as a matrix.\n"),
00409          ArgDoc ("input_vmat", "VMatrix containing the inputs"),
00410          RetDoc ("Matrix holding the computed outputs")));
00411 
00412     declareMethod(
00413         rmm, "useOnTrain", &PLearner::remote_useOnTrain,
00414         (BodyDoc("Compute the output of a trained learner on every row of \n"
00415                  "the trainset.  The outputs are returned as a matrix.\n"),
00416          RetDoc ("Matrix holding the computed outputs")));
00417 
00418     declareMethod(
00419         rmm, "computeInputOutputMat", &PLearner::computeInputOutputMat,
00420         (BodyDoc("Returns a matrix which is a (horizontal) concatenation\n"
00421                  "and the computed outputs.\n"),
00422          ArgDoc ("inputs", "VMatrix containing the inputs"),
00423          RetDoc ("Matrix holding the inputs+computed_outputs")));
00424 
00425     declareMethod(
00426         rmm, "computeInputOutputConfMat", &PLearner::computeInputOutputConfMat,
00427         (BodyDoc("Return a Mat that is the contatenation of inputs, outputs, lower\n"
00428                  "confidence bound, and upper confidence bound.  If confidence intervals\n"
00429                  "cannot be computed for the learner, they are filled with MISSING_VALUE.\n"),
00430          ArgDoc ("inputs", "VMatrix containing the inputs"),
00431          ArgDoc ("probability", "Level at which the confidence intervals should be computed, "
00432                                 "e.g. 0.95."),
00433          RetDoc ("Matrix holding the inputs+outputs+confidence-low+confidence-high")));
00434 
00435     declareMethod(
00436         rmm, "computeOutputConfMat", &PLearner::computeOutputConfMat,
00437         (BodyDoc("Return a Mat that is the contatenation of outputs, lower confidence\n"
00438                  "bound, and upper confidence bound.  If confidence intervals cannot be\n"
00439                  "computed for the learner, they are filled with MISSING_VALUE.\n"),
00440          ArgDoc ("inputs", "VMatrix containing the inputs"),
00441          ArgDoc ("probability", "Level at which the confidence intervals should be computed, "
00442                                 "e.g. 0.95."),
00443          RetDoc ("Matrix holding the outputs+confidence-low+confidence-high")));
00444 
00445     declareMethod(
00446         rmm, "computeOutputAndCosts", &PLearner::remote_computeOutputAndCosts,
00447         (BodyDoc("Compute both the output from the input, and the costs associated\n"
00448                  "with the desired target.  The computed costs\n"
00449                  "are returned in the order given by getTestCostNames()\n"),
00450          ArgDoc ("input",  "Input vector (should have width inputsize)"),
00451          ArgDoc ("target", "Target vector (for cost computation)"),
00452          RetDoc ("- Vec containing output \n"
00453                  "- Vec containing cost")));
00454 
00455     declareMethod(
00456         rmm, "computeOutputsAndCosts", &PLearner::remote_computeOutputsAndCosts,
00457         (BodyDoc("Compute both the output from the input, and the costs associated\n"
00458                  "with the desired target.  The computed costs\n"
00459                  "are returned in the order given by getTestCostNames()\n"
00460                  "This variant computes the outputs and the costs simultaneously\n"
00461                  "for a whole batch of examples (rows of the argument matrices)\n"),
00462          ArgDoc ("inputs", "Input matrix (batch_size x inputsize)"),
00463          ArgDoc ("targets", "Target matrix (batch_size x targetsize)"),
00464          RetDoc ("Pair containing first the resulting output matrix\n"
00465                  "(batch_size x outputsize), then the costs matrix\n"
00466                  "(batch_size x costsize)")));
00467 
00468     declareMethod(
00469         rmm, "computeCostsFromOutputs", &PLearner::remote_computeCostsFromOutputs,
00470         (BodyDoc("Compute the costs from already-computed output.  The computed costs\n"
00471                  "are returned in the order given by getTestCostNames()"),
00472          ArgDoc ("input",  "Input vector (should have width inputsize)"),
00473          ArgDoc ("output", "Output vector computed by previous call to computeOutput()"),
00474          ArgDoc ("target", "Target vector"),
00475          RetDoc ("The computed costs vector")));
00476 
00477     declareMethod(
00478         rmm, "computeCostsOnly", &PLearner::remote_computeCostsOnly,
00479         (BodyDoc("Compute the costs only, without the outputs; for some learners, this\n"
00480                  "may be more efficient than calling computeOutputAndCosts() if the\n"
00481                  "outputs are not needed.  (The default implementation simply calls\n"
00482                  "computeOutputAndCosts() and discards the output.)\n"),
00483          ArgDoc ("input",  "Input vector (should have width inputsize)"),
00484          ArgDoc ("target", "Target vector"),
00485          RetDoc ("The computed costs vector")));
00486 
00487     declareMethod(
00488         rmm, "computeConfidenceFromOutput", &PLearner::remote_computeConfidenceFromOutput,
00489         (BodyDoc("Compute a confidence intervals for the output, given the input and the\n"
00490                  "pre-computed output (resulting from computeOutput or similar).  The\n"
00491                  "probability level of the confidence interval must be specified.\n"
00492                  "(e.g. 0.95).  Result is stored in a TVec of pairs low:high for each\n"
00493                  "output variable (this is a \"box\" interval; it does not account for\n"
00494                  "correlations among the output variables).\n"),
00495          ArgDoc ("input",       "Input vector (should have width inputsize)"),
00496          ArgDoc ("output",      "Output vector computed by previous call to computeOutput()"),
00497          ArgDoc ("probability", "Level at which the confidence interval must be computed,\n"
00498                                 "e.g. 0.95\n"),
00499          RetDoc ("Vector of pairs low:high giving, respectively, the lower-bound confidence\n"
00500                  "and upper-bound confidence for each dimension of the output vector.  If this\n"
00501                  "vector is empty, then confidence intervals could not be computed for the\n"
00502                  "given learner.  Note that this is the PLearner default (not to compute\n"
00503                  "any confidence intervals), but some learners such as LinearRegressor\n"
00504                  "know how to compute them.")));
00505 
00506     declareMethod(
00507         rmm, "computeOutputCovMat", &PLearner::remote_computeOutputCovMat,
00508         (BodyDoc("Version of computeOutput that is capable of returning an output matrix\n"
00509                  "given an input matrix (set of output vectors), as well as the complete\n"
00510                  "covariance matrix between the outputs.\n"
00511                  "\n"
00512                  "A separate covariance matrix is returned for each output dimension, but\n"
00513                  "these matrices are allowed to share the same storage.  This would be\n"
00514                  "the case in situations where the output covariance really depends only\n"
00515                  "on the location of the training inputs, as in, e.g.,\n"
00516                  "GaussianProcessRegressor.\n"
00517                  "\n"
00518                  "The default implementation is to repeatedly call computeOutput,\n"
00519                  "followed by computeConfidenceFromOutput (sampled with probability\n"
00520                  "Erf[1/(2*Sqrt(2))], to extract 1*stddev given by subtraction of the two\n"
00521                  "intervals, then squaring the stddev to obtain the variance), thereby\n"
00522                  "filling a diagonal output covariance matrix.  If\n"
00523                  "computeConfidenceFromOutput returns 'false' (confidence intervals not\n"
00524                  "supported), the returned covariance matrix is filled with\n"
00525                  "MISSING_VALUE.\n"),
00526          ArgDoc ("inputs", "Matrix containing the set of test points"),
00527          RetDoc ("Two quantities are returned:\n"
00528                  "- The matrix containing the expected output (as rows) for each input row.\n"
00529                  "- A vector of covariance matrices between the outputs (one covariance\n"
00530                  "  matrix per output dimension).\n")));
00531     
00532     declareMethod(
00533         rmm, "batchComputeOutputAndConfidencePMat",
00534         &PLearner::remote_batchComputeOutputAndConfidence,
00535         (BodyDoc("Repeatedly calls computeOutput and computeConfidenceFromOutput with the\n"
00536                  "rows of inputs.  Writes outputs_and_confidence rows (as a series of\n"
00537                  "triples (output, low, high), one for each output).  The results are\n"
00538                  "stored in a .pmat whose filename is passed as argument.\n"),
00539          ArgDoc ("input_vmat",  "VMatrix containing the input rows"),
00540          ArgDoc ("probability", "Level at which the confidence interval must be computed,\n"
00541                                 "e.g. 0.95\n"),
00542          ArgDoc ("result_pmat_filename", "Filename where to store the results")));
00543 
00544     declareMethod(
00545         rmm, "getTestCostNames", &PLearner::getTestCostNames,
00546         (BodyDoc("Return the name of the costs computed by computeCostsFromOutputs()\n"
00547                  "and computeOutputAndCosts()"),
00548          RetDoc ("List of test cost names")));
00549 
00550     declareMethod(
00551         rmm, "getTrainCostNames", &PLearner::getTrainCostNames,
00552         (BodyDoc("Return the names of the objective costs that the train\n"
00553                  "method computes and for which it updates the VecStatsCollector\n"
00554                  "train_stats."),
00555          RetDoc ("List of train cost names")));
00556 }
00557 
00559 // setExperimentDirectory //
00561 void PLearner::setExperimentDirectory(const PPath& the_expdir) 
00562 { 
00563     if(the_expdir=="")
00564         expdir = "";
00565     else
00566     {
00567         if(!force_mkdir(the_expdir))
00568             PLERROR("In PLearner::setExperimentDirectory Could not create experiment directory %s",
00569                     the_expdir.absolute().c_str());
00570         expdir = the_expdir / "";
00571     }
00572 }
00573 
00574 void PLearner::setTrainingSet(VMat training_set, bool call_forget)
00575 { 
00576     // YB: je ne suis pas sur qu'il soit necessaire de faire un build si la
00577     // LONGUEUR du train_set a change?  les methodes non-parametriques qui
00578     // utilisent la longueur devrait faire leur "resize" dans train, pas dans
00579     // build.
00580     bool training_set_has_changed = !train_set || !(train_set->looksTheSameAs(training_set));
00581     train_set = training_set;
00582     if (training_set_has_changed)
00583     {
00584         inputsize_ = train_set->inputsize();
00585         targetsize_ = train_set->targetsize();
00586         weightsize_ = train_set->weightsize();
00587         if (forget_when_training_set_changes)
00588             call_forget=true;
00589     }
00590     n_examples = train_set->length();
00591     if (training_set_has_changed || call_forget)
00592         build(); // MODIF FAITE PAR YOSHUA: sinon apres un setTrainingSet le build n'est pas complete dans un NNet train_set = training_set;
00593     if (call_forget)
00594         forget();
00595 
00596     // Save the new training set if desired
00597     if (save_trainingset_prefix != "" && expdir != "") {
00598         static int trainingset_serial = 1;
00599         PPath fname = expdir / (save_trainingset_prefix + "_trainset_" +
00600                                 tostring(trainingset_serial++) + ".pmat");
00601         train_set->savePMAT(fname);
00602     }
00603 }
00604 
00605 void PLearner::setValidationSet(VMat validset)
00606 { validation_set = validset; }
00607 
00608 
00609 void PLearner::setTrainStatsCollector(PP<VecStatsCollector> statscol)
00610 {
00611     train_stats = statscol;
00612     train_stats->setFieldNames(getTrainCostNames());
00613 }
00614 
00615 
00616 int PLearner::inputsize() const
00617 { 
00618     if (inputsize_<0)
00619         PLERROR("Must specify a training set before calling PLearner::inputsize()"
00620                 " (or use a training set with a valid inputsize)"); 
00621     return inputsize_; 
00622 }
00623 
00624 int PLearner::targetsize() const 
00625 { 
00626     if(targetsize_ == -1) 
00627         PLERROR("In PLearner::targetsize (%s)- 'targetsize_' is -1,"
00628                 " either no training set has beeen specified or its sizes"
00629                 " were not set properly", this->classname().c_str());
00630     return targetsize_; 
00631 }
00632 
00633 int PLearner::weightsize() const 
00634 { 
00635     if(weightsize_ == -1) 
00636         PLERROR("In PLearner::weightsize - 'weightsize_' is -1, either no training set has beeen specified or its sizes were not set properly");
00637     return weightsize_; 
00638 }
00639 
00641 // build_ //
00643 void PLearner::build_()
00644 {
00645     if(expdir!="")
00646     {
00647         if(!force_mkdir(expdir))
00648             PLWARNING("In PLearner Could not create experiment directory %s",expdir.c_str());
00649         else
00650             expdir = expdir.absolute() / "";
00651     }
00652     if (random_gen && seed_ != 0)
00653         random_gen->manual_seed(seed_);
00654 }
00655 
00657 // build //
00659 void PLearner::build()
00660 {
00661     inherited::build();
00662     build_();
00663 }
00664 
00666 // forget //
00668 void PLearner::forget()
00669 {
00670     if (random_gen && seed_ != 0)
00671         random_gen->manual_seed(seed_);
00672     stage = 0;
00673     finalized=false;
00674 }
00675 
00677 // finalize //
00679 void PLearner::finalize()
00680 {
00681     finalized=true;
00682 }
00683 
00685 // nTestCosts //
00687 int PLearner::nTestCosts() const 
00688 { 
00689     if(n_test_costs_<0)
00690         n_test_costs_ = getTestCostNames().size(); 
00691     return n_test_costs_;
00692 }
00693 
00695 // nTrainCosts //
00697 int PLearner::nTrainCosts() const 
00698 { 
00699     if(n_train_costs_<0)
00700         n_train_costs_ = getTrainCostNames().size();
00701     return n_train_costs_; 
00702 }
00703 
00704 int PLearner::getTestCostIndex(const string& costname) const
00705 {
00706     TVec<string> costnames = getTestCostNames();
00707     for(int i=0; i<costnames.length(); i++)
00708         if(costnames[i]==costname)
00709             return i;
00710     PLERROR("In PLearner::getTestCostIndex, No test cost named %s in this learner.\n"
00711             "Available test costs are: %s", costname.c_str(),
00712             tostring(costnames).c_str());
00713     return -1;
00714 }
00715 
00716 int PLearner::getTrainCostIndex(const string& costname) const
00717 {
00718     TVec<string> costnames = getTrainCostNames();
00719     for(int i=0; i<costnames.length(); i++)
00720         if(costnames[i]==costname)
00721             return i;
00722     PLERROR("In PLearner::getTrainCostIndex, No train cost named %s in this learner.\n"
00723             "Available train costs are: %s", costname.c_str(), tostring(costnames).c_str());
00724     return -1;
00725 }
00726                                 
00727 void PLearner::computeOutputAndCosts(const Vec& input, const Vec& target, 
00728                                      Vec& output, Vec& costs) const
00729 {
00730     computeOutput(input, output);
00731     computeCostsFromOutputs(input, output, target, costs);
00732 }
00733 
00734 void PLearner::computeCostsOnly(const Vec& input, const Vec& target,  
00735                                 Vec& costs) const
00736 {
00737     tmp_output.resize(outputsize());
00738     computeOutputAndCosts(input, target, tmp_output, costs);
00739 }
00740 
00741 bool PLearner::computeConfidenceFromOutput(
00742     const Vec& input, const Vec& output,
00743     real probability,
00744     TVec< pair<real,real> >& intervals) const
00745 {
00746     // Default version does not know how to compute confidence intervals
00747     intervals.resize(output.size());
00748     intervals.fill(std::make_pair(MISSING_VALUE,MISSING_VALUE));  
00749     return false;
00750 }
00751 
00752 void PLearner::computeOutputCovMat(const Mat& inputs, Mat& outputs,
00753                                    TVec<Mat>& covariance_matrices) const
00754 {
00755     PLASSERT( inputs.width() == inputsize() && outputsize() > 0 );
00756     const int N = inputs.length();
00757     const int M = outputsize();
00758     outputs.resize(N, M);
00759     covariance_matrices.resize(M);
00760 
00761     bool has_confidence  = true;
00762     bool init_covariance = 0;
00763     Vec cur_input, cur_output;
00764     TVec< pair<real,real> > intervals;
00765     for (int i=0 ; i<N ; ++i) {
00766         cur_input  = inputs(i);
00767         cur_output = outputs(i);
00768         computeOutput(cur_input, cur_output);
00769         if (has_confidence) {
00770             static const real probability = pl_erf(1. / (2*sqrt(2.0)));
00771             has_confidence = computeConfidenceFromOutput(cur_input, cur_output,
00772                                                          probability, intervals);
00773             if (has_confidence) {
00774                 // Create the covariance matrices only once; filled with zeros
00775                 if (! init_covariance) {
00776                     for (int j=0 ; j<M ; ++j)
00777                         covariance_matrices[j] = Mat(N, N, 0.0);
00778                     init_covariance = true;
00779                 }
00780                 
00781                 // Compute the variance for each output j, and set it on
00782                 // element i,i of the j-th covariance matrix
00783                 for (int j=0 ; j<M ; ++j) {
00784                     float stddev = intervals[j].second - intervals[j].first;
00785                     float var = stddev*stddev;
00786                     covariance_matrices[j](i,i) = var;
00787                 }
00788             }
00789         }
00790     }
00791 
00792     // If confidence intervals are not supported, fill the covariance matrices
00793     // with missing values
00794     for (int j=0 ; j<M ; ++j)
00795         covariance_matrices[j] = Mat(N, N, MISSING_VALUE);
00796 }
00797 
00798 void PLearner::batchComputeOutputAndConfidence(VMat inputs, real probability, VMat outputs_and_confidence) const
00799 {
00800     Vec input(inputsize());
00801     Vec output(outputsize());
00802     int outsize = outputsize();
00803     Vec output_and_confidence(3*outsize);
00804     TVec< pair<real,real> > intervals;
00805     int l = inputs.length();
00806     for(int i=0; i<l; i++)
00807     {
00808         inputs->getRow(i,input);
00809         computeOutput(input,output);
00810         computeConfidenceFromOutput(input,output,probability,intervals);
00811         for(int j=0; j<outsize; j++)
00812         {
00813             output_and_confidence[3*j] = output[j];
00814             output_and_confidence[3*j+1] = intervals[j].first;
00815             output_and_confidence[3*j+2] = intervals[j].second;
00816         }
00817         outputs_and_confidence->putOrAppendRow(i,output_and_confidence);
00818     }
00819 }
00820 
00822 // use //
00824 void PLearner::use(VMat testset, VMat outputs) const
00825 {
00826     int l = testset.length();
00827     int w = testset.width();
00828 
00829     TVec< PP<RemotePLearnServer> > servers;
00830     if(nservers>0)
00831         servers = PLearnService::instance().reserveServers(nservers);
00832 
00833     if(servers.length()==0) 
00834     { // sequential code      
00835         Vec input;
00836         Vec target;
00837         real weight;
00838         Vec output(outputsize());
00839 
00840         PP<ProgressBar> pb;
00841         if(report_progress)
00842             pb = new ProgressBar("Using learner",l);
00843 
00844         if (test_minibatch_size==1)
00845         {
00846             for(int i=0; i<l; i++)
00847             {
00848                 testset.getExample(i, input, target, weight);
00849                 computeOutput(input, output);
00850                 outputs->putOrAppendRow(i,output);
00851                 if(pb)
00852                     pb->update(i);
00853             }
00854         } else
00855         {
00856             int out_size = outputsize() >= 0 ? outputsize() : 0;
00857             int n_batches = l/test_minibatch_size, i=0;
00858             b_inputs.resize(test_minibatch_size,inputsize());
00859             b_outputs.resize(test_minibatch_size, out_size);
00860             b_costs.resize(test_minibatch_size,nTestCosts());
00861             b_targets.resize(test_minibatch_size,targetsize());
00862             b_weights.resize(test_minibatch_size);
00863             for (int b=0;b<n_batches;b++,i+=test_minibatch_size)
00864             {
00865                 testset->getExamples(i,test_minibatch_size,b_inputs,b_targets,b_weights);
00866                 computeOutputs(b_inputs,b_outputs);
00867                 for (int j=0;j<test_minibatch_size;j++)
00868                 {
00869                     outputs->putOrAppendRow(i+j, b_outputs(j));
00870                 }
00871                 if (pb) pb->update(i+test_minibatch_size);
00872             }
00873             if (i<l)
00874             {
00875                 b_inputs.resize(l-i,inputsize());
00876                 b_outputs.resize(l-i, out_size);
00877                 b_costs.resize(l-i,nTestCosts());
00878                 b_targets.resize(l-i,targetsize());
00879                 b_weights.resize(l-i);
00880                 testset->getExamples(i,l-i,b_inputs,b_targets,b_weights);
00881                 computeOutputs(b_inputs,b_outputs);
00882                 for (int j=0;j<l-i;j++)
00883                 {
00884                     outputs->putOrAppendRow(i+j, b_outputs(j));
00885                 }
00886                 if (pb) pb->update(l);
00887             }
00888         }
00889 
00890 
00891     }
00892     else // parallel code
00893     {
00894         int n = servers.length(); // number of allocated servers
00895         DBG_LOG << "PLearner::use parallel code using " << n << " servers" << endl;
00896         for(int k=0; k<n; k++)  // send this object with objid 0
00897             servers[k]->newObject(0, *this);
00898         int chunksize = l/n;
00899         if(chunksize*n<l)
00900             ++chunksize;
00901         if(chunksize*w>1000000) // max 1 Mega elements
00902             chunksize = max(1,1000000/w);
00903         Mat chunk(chunksize,w);
00904         int send_i=0;
00905         Mat outmat;
00906         int receive_i = 0;
00907         while(send_i<l)
00908         {
00909             for(int k=0; k<n && send_i<l; k++)
00910             {
00911                 int actualchunksize = chunksize;
00912                 if(send_i+actualchunksize>l)
00913                     actualchunksize = l-send_i;
00914                 chunk.resize(actualchunksize,w);
00915                 testset->getMat(send_i, 0, chunk);
00916                 VMat inputs(chunk);
00917                 inputs->copySizesFrom(testset);
00918                 DBG_LOG << "PLearner::use calling use2 remote method with chunk starting at " 
00919                         << send_i << " of length " << actualchunksize << ":" << inputs << endl;
00920                 servers[k]->callMethod(0,"use2",inputs);
00921                 send_i += actualchunksize;
00922             }
00923             for(int k=0; k<n && receive_i<l; k++)
00924             {
00925                 outmat.resize(0,0);
00926                 servers[k]->getResults(outmat);
00927                 for(int ii=0; ii<outmat.length(); ii++)
00928                     outputs->putOrAppendRow(receive_i++,outmat(ii));
00929             }
00930         }
00931         if(send_i!=l || receive_i!=l)
00932             PLERROR("In PLearn::use parallel execution failed to complete successfully.");
00933     }
00934 }
00935 
00936 VMat PLearner::processDataSet(VMat dataset) const
00937 {
00938     // PLearnerOutputVMatrix does exactly this.
00939     return new PLearnerOutputVMatrix(dataset, this);
00940 }
00941 
00942 
00943 TVec<string> PLearner::getOutputNames() const
00944 {
00945     int n = outputsize();
00946     TVec<string> outnames(n);
00947     for(int k=0; k<n; k++)
00948         outnames[k] = "out" + tostring(k);
00949     return outnames;
00950 }
00951 
00953 // useOnTrain //
00955 void PLearner::useOnTrain(Mat& outputs) const {
00956     outputs.resize(train_set.length(), outputsize());
00957     VMat train_output(outputs);
00958     use(train_set, train_output);
00959 }
00960 
00961 Mat PLearner::remote_useOnTrain() const 
00962 {
00963     Mat outputs;
00964     useOnTrain(outputs);
00965     return outputs;
00966 }
00967 
00969 // test //
00971 void PLearner::test(VMat testset, PP<VecStatsCollector> test_stats,
00972                     VMat testoutputs, VMat testcosts) const
00973 {
00974 
00975     Profiler::pl_profile_start("PLearner::test");
00976 
00977     int len = testset.length();
00978     Vec input;
00979     Vec target;
00980     real weight;
00981     int out_size = outputsize() >= 0 ? outputsize() : 0;
00982 
00983     Vec output(out_size);
00984     Vec costs(nTestCosts());
00985 
00986     if (test_stats) {
00987         // Set names of test_stats costs
00988         test_stats->setFieldNames(getTestCostNames());
00989 
00990         if (len == 0) {
00991             // Empty test set: we give -1 cost arbitrarily.
00992             costs.fill(-1);
00993             test_stats->update(costs);
00994         }
00995     }
00996 
00997     PP<ProgressBar> pb;
00998     if (report_progress)
00999         pb = new ProgressBar("Testing learner", len);
01000 
01001     PP<PRandom> copy_random_gen=0;
01002     if (use_a_separate_random_generator_for_testing && random_gen)
01003     {
01004         CopiesMap copies;
01005         copy_random_gen = random_gen->deepCopy(copies);
01006         random_gen->manual_seed(use_a_separate_random_generator_for_testing);
01007     }
01008 
01009     PLearnService& service(PLearnService::instance());
01010 
01011     //DUMMY: need to find a better way to calc. nservers -xsm
01012     const int chunksize= 2500;//nb. rows in each chunk sent to a remote server
01013     const int chunks_per_server= 3;//ideal nb. chunks per server
01014     int nservers= min(len/(chunks_per_server*chunksize), service.availableServers());
01015 
01016     if(nservers > 1 && parallelize_here && !isStatefulLearner())
01017     {// parallel test
01018         CopiesMap copies;
01019         PP<VecStatsCollector> template_vsc= test_stats? test_stats->deepCopy(copies) : 0;
01020         TVec<PP<RemotePLearnServer> > servers= service.reserveServers(nservers);
01021         nservers= servers.length();
01022         int curpos= 0;
01023         int chunks_called= 0;
01024         int last_chunknum= -1;
01025         map<PP<RemotePLearnServer>, int> learners_ids;
01026         map<PP<RemotePLearnServer>, int> chunknums;
01027         map<int, PP<VecStatsCollector> > vscs;
01028         map<PP<RemotePLearnServer>, int> chunkszs;
01029         int rowsdone= 0;
01030 
01031         bool rep_prog= report_progress;
01032         const_cast<bool&>(report_progress)= false;//servers dont report progress
01033         for(int i= 0; i < nservers; ++i)
01034             servers[i]->newObjectAsync(*this);
01035         const_cast<bool&>(report_progress)= rep_prog;
01036 
01037         while(nservers > 0)
01038         {
01039             PP<RemotePLearnServer> s= service.waitForResult();
01040             if(learners_ids.find(s) == learners_ids.end())
01041             {
01042                 if(curpos < len) // get learner id and send first chunk to process
01043                 {
01044                     /* step 1 (once per slave) */
01045                     int id;
01046                     s->getResults(id);
01047                     learners_ids[s]= id;
01048                     int clen= min(chunksize, testset.length()-curpos);
01049                     chunkszs[s]= clen;
01050                     VMat sts= new RowsSubVMatrix(testset, curpos, clen);
01051                     if(master_sends_testset_rows)
01052                         sts= new MemoryVMatrix(sts.toMat());
01053                     else
01054                     {
01055                         // send testset once and for all, put it in object map of remote server
01056                         int tsid= s->newObject(*testset);
01057                         s->link(tsid, testset);
01058                     }
01059                     curpos+= clen;
01060                     s->callMethod(id, "sub_test", sts, template_vsc, 
01061                                   static_cast<bool>(testoutputs), static_cast<bool>(testcosts));
01062                     chunknums[s]= chunks_called;
01063                     ++chunks_called;
01064                 }
01065                 else // all chunks processed, free server
01066                 {
01067                     /* step 4 (once per slave) */
01068                     s->getResults(); // learner deleted
01069                     s->unlink(testset);
01070                     service.freeServer(s);
01071                     --nservers;
01072                 }
01073             }
01074             else // get chunk result
01075             {
01076                 PP<VecStatsCollector> vsc;
01077                 VMat chunkout, chunkcosts;
01078 
01079                 s->getResults(vsc, chunkout, chunkcosts);
01080 
01081                 rowsdone+= chunkszs[s];
01082                 if(report_progress) pb->update(rowsdone);
01083 
01084                 int chunknum= chunknums[s];
01085                 if(curpos < len) // more chunks to do, assign one to this server
01086                 {
01087                     /* step 2 (repeat as needed) */
01088                     int clen= min(chunksize, testset.length()-curpos);
01089                     chunkszs[s]= clen;
01090                     VMat sts= new RowsSubVMatrix(testset, curpos, clen);
01091                     if(master_sends_testset_rows)
01092                         sts= new MemoryVMatrix(sts.toMat());
01093                     curpos+= clen;
01094                     s->callMethod(learners_ids[s], "sub_test", sts, template_vsc, 
01095                                   static_cast<bool>(testoutputs), static_cast<bool>(testcosts));
01096                     chunknums[s]= chunks_called;
01097                     ++chunks_called;
01098                 }
01099                 else // all chunks processed, delete learner form server
01100                 {
01101                     /* step 3 (once per slave) */
01102                     s->deleteObjectAsync(learners_ids[s]);
01103                     learners_ids.erase(s);
01104                 }
01105 
01106                 // now merge chunk results w/ global results
01107                 if(test_stats)
01108                 {
01109                     vscs[chunknum]= vsc;
01110                     map<int, PP<VecStatsCollector> >::iterator it= vscs.find(last_chunknum+1);
01111                     while(it != vscs.end())
01112                     {
01113                         ++last_chunknum;
01114                         test_stats->merge(*(it->second));
01115                         vscs.erase(it);
01116                         it= vscs.find(last_chunknum+1);
01117                     }
01118                 }
01119 
01120                 if(testoutputs)
01121                     for(int i= 0, j= chunknum*chunksize; i < chunksize && j < len; ++i, ++j)
01122                         testoutputs->forcePutRow(j, chunkout->getRowVec(i));
01123                 if(testcosts)
01124                     for(int i= 0, j= chunknum*chunksize; i < chunksize && j < len; ++i, ++j)
01125                         testcosts->forcePutRow(j, chunkcosts->getRowVec(i));
01126             }
01127         }
01128     }
01129     else // Sequential test 
01130     {
01131         if (test_minibatch_size==1)
01132         {
01133             for (int i = 0; i < len; i++)
01134             {
01135                 testset.getExample(i, input, target, weight);
01136                 // Always call computeOutputAndCosts, since this is better
01137                 // behaved with stateful learners
01138                 computeOutputAndCosts(input,target,output,costs);
01139                 if (testoutputs) testoutputs->putOrAppendRow(i, output);
01140                 if (testcosts) testcosts->putOrAppendRow(i, costs);
01141                 if (test_stats) test_stats->update(costs, weight);
01142                 if (report_progress) pb->update(i);
01143             }
01144         } else
01145         {
01146             int n_batches = len/test_minibatch_size, i=0;
01147             b_inputs.resize(test_minibatch_size,inputsize());
01148             b_outputs.resize(test_minibatch_size, out_size);
01149             b_costs.resize(test_minibatch_size,costs.length());
01150             b_targets.resize(test_minibatch_size,targetsize());
01151             b_weights.resize(test_minibatch_size);
01152             for (int b=0;b<n_batches;b++,i+=test_minibatch_size)
01153             {
01154                 testset->getExamples(i,test_minibatch_size,b_inputs,b_targets,b_weights);
01155                 computeOutputsAndCosts(b_inputs,b_targets,b_outputs,b_costs);
01156                 for (int j=0;j<test_minibatch_size;j++)
01157                 {
01158                     if (testoutputs) testoutputs->putOrAppendRow(i+j, b_outputs(j));
01159                     if (testcosts) testcosts->putOrAppendRow(i+j, b_costs(j));
01160                     if (test_stats) test_stats->update(b_costs(j), b_weights[j]);
01161                     if (report_progress) pb->update(i+j);
01162                 }
01163             }
01164             if (i<len)
01165             {
01166                 b_inputs.resize(len-i,inputsize());
01167                 b_outputs.resize(len-i, out_size);
01168                 b_costs.resize(len-i,costs.length());
01169                 b_targets.resize(len-i,targetsize());
01170                 b_weights.resize(len-i);
01171                 testset->getExamples(i,len-i,b_inputs,b_targets,b_weights);
01172                 computeOutputsAndCosts(b_inputs,b_targets,b_outputs,b_costs);
01173                 for (int j=0;j<len-i;j++)
01174                 {
01175                     if (testoutputs) testoutputs->putOrAppendRow(i+j, b_outputs(j));
01176                     if (testcosts) testcosts->putOrAppendRow(i+j, b_costs(j));
01177                     if (test_stats) test_stats->update(b_costs(j), b_weights[j]);
01178                     if (report_progress) pb->update(i+j);
01179                 }
01180             }
01181         }
01182     }
01183 
01184     if (use_a_separate_random_generator_for_testing && random_gen)
01185         *random_gen = *copy_random_gen;
01186 
01187     Profiler::pl_profile_end("PLearner::test");
01188 
01189 }
01190 
01191 void PLearner::computeOutput(const Vec& input, Vec& output) const
01192 {
01193     PLERROR("PLearner::computeOutput(Vec,Vec) not implemented in subclass %s\n",classname().c_str());
01194 }
01195 void PLearner::computeOutputs(const Mat& input, Mat& output) const
01196 {
01197     // inefficient default implementation
01198     int n=input.length();
01199     PLASSERT(output.length()==n);
01200     for (int i=0;i<n;i++)
01201     {
01202         Vec in_i = input(i);
01203         Vec out_i = output(i); 
01204         computeOutput(in_i,out_i);
01205     }
01206 }
01207 void PLearner::computeOutputsAndCosts(const Mat& input, const Mat& target, 
01208                                       Mat& output, Mat& costs) const
01209 {
01210     // inefficient default implementation
01211     int n=input.length();
01212     PLASSERT(target.length()==n);
01213     output.resize(n,outputsize());
01214     costs.resize(n,nTestCosts());
01215     for (int i=0;i<n;i++)
01216     {
01217         Vec in_i = input(i);
01218         Vec out_i = output(i); 
01219         Vec target_i = target(i);
01220         Vec c_i = costs(i);
01221         computeOutputAndCosts(in_i,target_i,out_i,c_i);
01222     }
01223 }
01224 
01225 
01227 // sub-test, used by parallel test ('remote' version which returns a tuple w/ results.) //
01229 tuple<PP<VecStatsCollector>, VMat, VMat> PLearner::sub_test(VMat testset, PP<VecStatsCollector> test_stats, bool rtestoutputs, bool rtestcosts) const
01230 {
01231     VMat testoutputs= 0;
01232     VMat testcosts= 0;
01233     int outsize= outputsize();
01234     int costsize= nTestCosts();
01235     int len= testset.length();
01236     if(rtestoutputs) testoutputs= new MemoryVMatrix(len, outsize);
01237     if(rtestcosts) testcosts= new MemoryVMatrix(len, costsize);
01238     if(test_stats)
01239     {
01240         if(test_stats->maxnvalues > 0) test_stats->maxnvalues= -1; // get all counts from a chunk
01241         if(test_stats->m_window == -1 || test_stats->m_window > 0)
01242             test_stats->setWindowSize(-2); // get all observations
01243     }
01244     test(testset, test_stats, testoutputs, testcosts);
01245     return make_tuple(test_stats, testoutputs, testcosts);
01246 }
01247 
01248 
01250 // remote interface for test                                                            //
01252 tuple<PP<VecStatsCollector>, VMat, VMat> PLearner::remote_test(VMat testset, PP<VecStatsCollector> test_stats, bool rtestoutputs, bool rtestcosts) const
01253 {
01254     VMat testoutputs= 0;
01255     VMat testcosts= 0;
01256     int outsize= outputsize();
01257     if (outsize < 0)
01258         // Negative outputsize: the output will be empty to avoid a crash.
01259         outsize = 0;
01260     int costsize= nTestCosts();
01261     int len= testset.length();
01262     if(rtestoutputs) testoutputs= new MemoryVMatrix(len, outsize);
01263     if(rtestcosts) testcosts= new MemoryVMatrix(len, costsize);
01264     test(testset, test_stats, testoutputs, testcosts);
01265     return make_tuple(test_stats, testoutputs, testcosts);
01266 }
01267 
01269 // initTrain //
01271 bool PLearner::initTrain()
01272 {
01273     string warn_msg = "In PLearner::initTrain (called by '" +
01274         this->classname() + "') - ";
01275 
01276     // Check 'nstages' is valid.
01277     if (nstages < 0) {
01278         PLWARNING((warn_msg + "Option nstages (set to " + tostring(nstages)
01279                     + ") must be non-negative").c_str());
01280         return false;
01281     }
01282 
01283     // Check we actually need to train.
01284     if (stage == nstages) {
01285         if (verbosity >= 1)
01286             PLWARNING((warn_msg + "The learner is already trained").c_str());
01287         return false;
01288     }
01289 
01290     if (stage > nstages) {
01291         if (verbosity >= 1) {
01292             string msg = warn_msg + "Learner was already trained up to stage "
01293                 + tostring(stage) + ", but asked to train up to nstages="
01294                 + tostring(nstages) + ": it will be reverted to stage 0 and "
01295                                       "trained again";
01296             PLWARNING(msg.c_str());
01297         }
01298         forget();
01299     }
01300 
01301     // Check there is a training set.
01302     if (!train_set) {
01303         if (verbosity >= 1)
01304             PLWARNING((warn_msg + "No training set specified").c_str());
01305         return false;
01306     }
01307 
01308     // Initialize train_stats if needed.
01309     if (!train_stats)
01310         train_stats = new VecStatsCollector();
01311 
01312     // Meta learners may need to set the stats_collector of their sub-learners
01313     setTrainStatsCollector(train_stats);
01314 
01315     // Everything is fine.
01316     return true;
01317 }
01318 
01320 // resetInternalState //
01322 void PLearner::resetInternalState()
01323 { }
01324 
01325 bool PLearner::isStatefulLearner() const
01326 {
01327     return false;
01328 }
01329 
01330 
01331 //#####  computeInputOutputMat  ###############################################
01332 
01333 Mat PLearner::computeInputOutputMat(VMat inputs) const
01334 {
01335     int l = inputs.length();
01336     int nin = inputsize();
01337     int nout = outputsize();
01338     Mat m(l, nin+nout);
01339     for(int i=0; i<l; i++)
01340     {
01341         Vec v = m(i);
01342         Vec invec = v.subVec(0,nin);
01343         Vec outvec = v.subVec(nin,nout);
01344         inputs->getRow(i, invec);
01345         computeOutput(invec, outvec);
01346     }
01347     return m;
01348 }
01349 
01350 
01351 //#####  computeInputOutputConfMat  ###########################################
01352 
01353 Mat PLearner::computeInputOutputConfMat(VMat inputs, real probability) const
01354 {
01355     int l = inputs.length();
01356     int nin = inputsize();
01357     int nout = outputsize();
01358     Mat m(l, nin+3*nout);
01359     TVec< pair<real,real> > intervals;
01360     for(int i=0; i<l; i++)
01361     {
01362         Vec v = m(i);
01363         Vec invec   = v.subVec(0,nin);
01364         Vec outvec  = v.subVec(nin,nout);
01365         Vec lowconf = v.subVec(nin+nout, nout);
01366         Vec hiconf  = v.subVec(nin+2*nout, nout);
01367         inputs->getRow(i, invec);
01368         computeOutput(invec, outvec);
01369         bool conf_avail = computeConfidenceFromOutput(invec, outvec,
01370                                                       probability, intervals);
01371         if (conf_avail) {
01372             for (int j=0, n=intervals.size() ; j<n ; ++j) {
01373                 lowconf[j] = intervals[j].first;
01374                 hiconf[j]  = intervals[j].second;
01375             }
01376         }
01377         else {
01378             lowconf << MISSING_VALUE;
01379             hiconf  << MISSING_VALUE;
01380         }
01381     }
01382     return m;
01383 }
01384 
01385 
01386 //#####  computeOutputConfMat  ################################################
01387 
01388 Mat PLearner::computeOutputConfMat(VMat inputs, real probability) const
01389 {
01390     int l = inputs.length();
01391     int nin = inputsize();
01392     int nout = outputsize();
01393     Mat m(l, 3*nout);
01394     TVec< pair<real,real> > intervals;
01395     Vec invec(nin);
01396     for(int i=0; i<l; i++)
01397     {
01398         Vec v = m(i);
01399         Vec outvec  = v.subVec(0, nout);
01400         Vec lowconf = v.subVec(nout, nout);
01401         Vec hiconf  = v.subVec(2*nout, nout);
01402         inputs->getRow(i, invec);
01403         computeOutput(invec, outvec);
01404         bool conf_avail = computeConfidenceFromOutput(invec, outvec,
01405                                                       probability, intervals);
01406         if (conf_avail) {
01407             for (int j=0, n=intervals.size() ; j<n ; ++j) {
01408                 lowconf[j] = intervals[j].first;
01409                 hiconf[j]  = intervals[j].second;
01410             }
01411         }
01412         else {
01413             lowconf << MISSING_VALUE;
01414             hiconf  << MISSING_VALUE;
01415         }
01416     }
01417     return m;
01418 }
01419 
01420 
01422 // remote_computeOutput //
01425 Vec PLearner::remote_computeOutput(const Vec& input) const
01426 {
01427     int os = outputsize();
01428     tmp_output.resize(os >= 0 ? os : 0);
01429     computeOutput(input, tmp_output);
01430     return tmp_output;
01431 }
01432 
01434 // remote_computeOutputs //
01436 Mat PLearner::remote_computeOutputs(const Mat& input) const
01437 {
01438     Mat out(input.length(), outputsize() >= 0 ? outputsize() : 0);
01439     computeOutputs(input, out);
01440     return out;
01441 }
01442 
01444 // remote_computeOutputsAndCosts //
01446 pair<Mat, Mat> PLearner::remote_computeOutputsAndCosts(const Mat& input,
01447                                                        const Mat& target) const
01448 {
01449     Mat output, cost;
01450     computeOutputsAndCosts(input, target, output, cost);
01451     return pair<Mat, Mat>(output, cost);
01452 }
01453 
01455 // remote_use //
01458 void PLearner::remote_use(VMat inputs, string output_fname) const
01459 {
01460     VMat outputs = new FileVMatrix(output_fname, inputs.length(), outputsize());
01461     use(inputs,outputs);
01462 }
01463 
01465 Mat PLearner::remote_use2(VMat inputs) const
01466 {
01467     Mat outputs(inputs.length(), outputsize());
01468     use(inputs,outputs);
01469     return outputs;
01470 }
01471 
01473 
01474 tuple<Vec,Vec> PLearner::remote_computeOutputAndCosts(const Vec& input, const Vec& target) const
01475 {
01476     tmp_output.resize(outputsize());
01477     Vec costs(nTestCosts());
01478     computeOutputAndCosts(input,target,tmp_output,costs);
01479     return make_tuple(tmp_output, costs);
01480 }
01481 
01483 Vec PLearner::remote_computeCostsFromOutputs(const Vec& input, const Vec& output,
01484                                              const Vec& target) const
01485 {
01486     Vec costs(nTestCosts());
01487     computeCostsFromOutputs(input,output,target,costs);
01488     return costs;
01489 }
01490 
01492 Vec PLearner::remote_computeCostsOnly(const Vec& input, const Vec& target) const
01493 {
01494     Vec costs(nTestCosts());
01495     computeCostsOnly(input,target,costs);
01496     return costs;
01497 }
01498 
01500 TVec< pair<real,real> >
01501 PLearner::remote_computeConfidenceFromOutput(const Vec& input, const Vec& output,
01502                                              real probability) const
01503 {
01504     TVec< pair<real,real> > intervals(output.length());
01505     bool ok = computeConfidenceFromOutput(input, output, probability, intervals);
01506     if (ok)
01507         return intervals;
01508     else
01509         return TVec< pair<real,real> >();
01510 }
01511 
01513 tuple<Mat, TVec<Mat> >
01514 PLearner::remote_computeOutputCovMat(const Mat& inputs) const
01515 {
01516     Mat outputs;
01517     TVec<Mat> covmat;
01518     computeOutputCovMat(inputs, outputs, covmat);
01519     return make_tuple(outputs, covmat);
01520 }
01521 
01523 void PLearner::remote_batchComputeOutputAndConfidence(VMat inputs, real probability,
01524                                                       string pmat_fname) const
01525 {
01526     TVec<string> fieldnames;
01527     for(int j=0; j<outputsize(); j++)
01528     {
01529         fieldnames.append("output_"+tostring(j));
01530         fieldnames.append("low_"+tostring(j));
01531         fieldnames.append("high_"+tostring(j));
01532     }
01533     VMat out_and_conf = new FileVMatrix(pmat_fname,inputs.length(),fieldnames);
01534     batchComputeOutputAndConfidence(inputs, probability, out_and_conf);
01535 }
01536 
01537 
01538 } // end of namespace PLearn
01539 
01540 
01541 /*
01542   Local Variables:
01543   mode:c++
01544   c-basic-offset:4
01545   c-file-style:"stroustrup"
01546   c-file-offsets:((innamespace . 0)(inline-open . 0))
01547   indent-tabs-mode:nil
01548   fill-column:79
01549   End:
01550 */
01551 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines