PLearn 0.1
VPLPreprocessedLearner2.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // VPLPreprocessedLearner2.cc
00004 //
00005 // Copyright (C) 2005, 2006 Pascal Vincent 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: VPLPreprocessedLearner2.cc 5480 2006-05-03 18:57:39Z plearner $ 
00037  ******************************************************* */
00038 
00039 // Authors: Pascal Vincent
00040 
00044 #include "VPLPreprocessedLearner2.h"
00045 #include <plearn/vmat/ProcessingVMatrix.h>
00046 #include <plearn/vmat/FilteredVMatrix.h>
00047 #include <plearn/base/tostring.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 VPLPreprocessedLearner2::VPLPreprocessedLearner2() 
00053     :orig_inputsize(-1),
00054      orig_targetsize(-1),
00055      use_filtering_prg_for_repeat(false),
00056      repeat_id_field_name(""),
00057      repeat_count_field_name(""),
00058      ignore_test_costs(false)
00059 
00060 {
00061 }
00062 
00063 PLEARN_IMPLEMENT_OBJECT(
00064     VPLPreprocessedLearner2,
00065     "Learner whose training-set, inputs and outputs can be pre/post-processed by VPL code",
00066     "See VMatLanguage for the definition of the allowed VPL syntax."
00067     );
00068 
00069 void VPLPreprocessedLearner2::declareOptions(OptionList& ol)
00070 {
00071     // ### Declare all of this object's options here
00072     // ### For the "flags" of each option, you should typically specify  
00073     // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00074     // ### OptionBase::tuningoption. Another possible flag to be combined with
00075     // ### is OptionBase::nosave
00076 
00077     // ### ex:
00078     // declareOption(ol, "myoption", &VPLPreprocessedLearner2::myoption, OptionBase::buildoption,
00079     //               "Help text describing this option");
00080     // ...
00081 
00082     declareOption(ol, "learner", &VPLPreprocessedLearner2::learner_,
00083                   OptionBase::buildoption,
00084                   "The embedded learner");
00085 
00086     declareOption(ol, "filtering_prg", &VPLPreprocessedLearner2::filtering_prg, OptionBase::buildoption,
00087                   "Optional program string in VPL language to apply as filtering on the training VMat.\n"
00088                   "It's the resulting filtered training set that is passed to the underlying learner.\n"
00089                   "This program is to produce a single value interpreted as a boolean: only the rows for which\n"
00090                   "it evaluates to non-zero will be kept.\n"
00091                   "An empty string means NO FILTERING.");
00092 
00093     declareOption(ol, "input_prg", &VPLPreprocessedLearner2::input_prg, OptionBase::buildoption,
00094                   "Program string in VPL language to be applied to each raw input \n"
00095                   "to generate the new preprocessed input.\n"
00096                   "Note that names must be given to the generated values with :fieldname VPL syntax.\n"
00097                   "An empty string means NO PREPROCESSING. (initial raw input is used as is)");
00098 
00099     declareOption(ol, "target_prg", &VPLPreprocessedLearner2::target_prg, OptionBase::buildoption,
00100                   "Program string in VPL language to be applied to a dataset row\n"
00101                   "to generate a proper target for the underlying learner.\n"
00102                   "Note that names must be given to the generated values with :fieldname VPL syntax.\n"
00103                   "If it's an empty string, then we'll use the original target from the data set");
00104   
00105     declareOption(ol, "weight_prg", &VPLPreprocessedLearner2::weight_prg, OptionBase::buildoption,
00106                   "Program string in VPL language to be applied to a dataset row\n"
00107                   "to generate a proper weight for the underlying learner.\n"
00108                   "Note that names must be given to the generated values with :fieldname VPL syntax.\n"
00109                   "If it's an empty string, then we'll use the original weight from the data set");
00110 
00111     declareOption(ol, "extra_prg", &VPLPreprocessedLearner2::extra_prg, OptionBase::buildoption,
00112                   "Program string in VPL language to be applied to a dataset row\n"
00113                   "to generate proper extra fields for the underlying learner.\n"
00114                   "Note that names must be given to the generated values with :fieldname VPL syntax.\n"
00115                   "If it's an empty string, then we'll use the original extra fields from the data set");
00116 
00117     declareOption(ol, "output_prg", &VPLPreprocessedLearner2::output_prg, OptionBase::buildoption,
00118                   "Program string in VPL language to obtain postprocessed output\n"
00119                   "from a concatenation of the raw input fields and the underlying learner's outputs\n"
00120                   "The underlying learner's outputs are typically named out0, out1, out2, ...\n"
00121                   "Note that outputnames must be given to the generated values with :fieldname VPL syntax.\n"
00122                   "If it's an empty string, then we'll output the underlying learner's outputs.\n");
00123 
00124     declareOption(ol, "costs_prg", &VPLPreprocessedLearner2::costs_prg, OptionBase::buildoption,
00125                   "Program string in VPL language to obtain postprocessed test costs\n"
00126                   "from a concatenation of the raw input fields and target fields, \n"
00127                   "and the underlying learner's outputs and test costs.\n"
00128                   "Note that names must be given to the generated values with :fieldname VPL syntax.\n"
00129                   "If it's an empty string, then we'll output the underlying learner's test costs.\n"
00130                   "Note that this processing is only applied to test costs, not to train costs which are returned as is.");
00131 
00132     declareOption(ol, "orig_fieldnames", &VPLPreprocessedLearner2::orig_fieldnames, OptionBase::learntoption,
00133                   "original fieldnames of the training set");
00134     declareOption(ol, "orig_inputsize", &VPLPreprocessedLearner2::orig_inputsize, OptionBase::learntoption,
00135                   "original inputsize of the training set");
00136     declareOption(ol, "orig_targetsize", &VPLPreprocessedLearner2::orig_targetsize, OptionBase::learntoption,
00137                   "original targetsize of the training set");
00138 
00139 
00140     declareOption(ol, "use_filtering_prg_for_repeat", &VPLPreprocessedLearner2::use_filtering_prg_for_repeat, OptionBase::buildoption,
00141                   "When true, the result of the filtering program indicates the number of times a row should be repeated (0..n).\n"
00142                   "(sets FilteredVMatrix::allow_repeat_rows.)");
00143 
00144     declareOption(ol, "repeat_id_field_name", &VPLPreprocessedLearner2::repeat_id_field_name, OptionBase::buildoption,
00145                   "Field name for the repetition id (0, 1, ..., n-1).  No field is added if empty.");
00146 
00147     declareOption(ol, "repeat_count_field_name", &VPLPreprocessedLearner2::repeat_count_field_name, OptionBase::buildoption,
00148                   "Field name for the number of repetitions (n).  No field is added if empty.");
00149 
00150     declareOption(ol, "ignore_test_costs", &VPLPreprocessedLearner2::ignore_test_costs, OptionBase::buildoption,
00151                   "WARNING: THIS IS AN UGLY HACK!!\n"
00152                   "When set to true, computeOutputAndCosts will simply call computeOutput and return bogus costs.");
00153 
00154 
00155 
00156 
00157     // Now call the parent class' declareOptions
00158     inherited::declareOptions(ol);
00159 }
00160 
00161 void VPLPreprocessedLearner2::build_()
00162 {
00163     if(train_set.isNull() && (orig_inputsize>0 || orig_targetsize>0) ) // we're probably reloading a saved VPLPreprocessedLearner2
00164     {
00165         initializeInputPrograms();
00166         initializeOutputPrograms();
00167     }
00168     else if(!costs_prg.empty())
00169         VMatLanguage::getOutputFieldNamesFromString(costs_prg, costs_prg_fieldnames);
00170 }
00171 
00172 // ### Nothing to add here, simply calls build_
00173 void VPLPreprocessedLearner2::build()
00174 {
00175     inherited::build();
00176     build_();
00177 }
00178 
00179 
00180 void VPLPreprocessedLearner2::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00181 {
00182     inherited::makeDeepCopyFromShallowCopy(copies);
00183 
00184     // ### Call deepCopyField on all "pointer-like" fields 
00185     // ### that you wish to be deepCopied rather than 
00186     // ### shallow-copied.
00187 
00188     deepCopyField(learner_, copies);    
00189 
00190     input_prg_.makeDeepCopyFromShallowCopy(copies);
00191     target_prg_.makeDeepCopyFromShallowCopy(copies);
00192     weight_prg_.makeDeepCopyFromShallowCopy(copies);
00193     extra_prg_.makeDeepCopyFromShallowCopy(copies);
00194     output_prg_.makeDeepCopyFromShallowCopy(copies);
00195     costs_prg_.makeDeepCopyFromShallowCopy(copies);
00196  
00197     deepCopyField(input_prg_fieldnames, copies);
00198     deepCopyField(target_prg_fieldnames, copies);
00199     deepCopyField(weight_prg_fieldnames, copies);
00200     deepCopyField(extra_prg_fieldnames, copies);
00201     deepCopyField(output_prg_fieldnames, copies);
00202     deepCopyField(costs_prg_fieldnames, copies);
00203     deepCopyField(row, copies);
00204     deepCopyField(processed_input, copies);
00205     deepCopyField(processed_target, copies);
00206     deepCopyField(processed_weight, copies);
00207     deepCopyField(processed_extra, copies);
00208     deepCopyField(pre_output, copies);
00209     deepCopyField(pre_costs, copies);
00210 }
00211 
00212 void VPLPreprocessedLearner2::setValidationSet(VMat validset)
00213 {
00214     PLASSERT( learner_ );
00215     inherited::setValidationSet(validset);
00216     learner_->setValidationSet(validset);
00217 }
00218 
00219 void VPLPreprocessedLearner2::setTrainStatsCollector(PP<VecStatsCollector> statscol)
00220 {
00221     PLASSERT( learner_ );
00222     inherited::setTrainStatsCollector(statscol);
00223     learner_->setTrainStatsCollector(statscol);
00224 }
00225 
00226 int VPLPreprocessedLearner2::outputsize() const
00227 {
00228     if(!output_prg.empty())
00229         return output_prg_fieldnames.length();
00230     else
00231     {
00232         PLASSERT( learner_ );
00233         return learner_->outputsize();
00234     }
00235 }
00236 
00237 void VPLPreprocessedLearner2::setExperimentDirectory(const PPath& the_expdir)
00238 {
00239     PLASSERT( learner_ );
00240     inherited::setExperimentDirectory(the_expdir);
00241     learner_->setExperimentDirectory(the_expdir);
00242 }
00243 
00244 void VPLPreprocessedLearner2::forget()
00245 {
00246     PLASSERT( learner_);
00247     learner_->forget();
00248     stage = 0;
00249 }
00250     
00251 void VPLPreprocessedLearner2::train()
00252 {
00253     PLASSERT( learner_ );
00254     learner_->train();
00255     stage = learner_->stage;
00256 }
00257 
00258 void VPLPreprocessedLearner2::initializeInputPrograms()
00259 {
00260     if(!input_prg.empty())
00261     {
00262         input_prg_.setSourceFieldNames(orig_fieldnames.subVec(0,orig_inputsize));
00263         input_prg_.compileString(input_prg, input_prg_fieldnames);
00264     }
00265     else
00266     {
00267         input_prg_.clear();
00268         input_prg_fieldnames.resize(0);
00269     }
00270 
00271     if(!target_prg.empty() && !ignore_test_costs)
00272     {
00273         target_prg_.setSourceFieldNames(orig_fieldnames);
00274         target_prg_.compileString(target_prg, target_prg_fieldnames);
00275     }
00276     else
00277     {
00278         target_prg_.clear();
00279         target_prg_fieldnames.resize(0);
00280     }
00281 
00282     if(!weight_prg.empty() && !ignore_test_costs)
00283     {
00284         weight_prg_.setSourceFieldNames(orig_fieldnames);
00285         weight_prg_.compileString(weight_prg, weight_prg_fieldnames);
00286     }
00287     else
00288     {
00289         weight_prg_.clear();
00290         weight_prg_fieldnames.resize(0);
00291     }
00292 
00293     if(!extra_prg.empty())
00294     {
00295         extra_prg_.setSourceFieldNames(orig_fieldnames);
00296         extra_prg_.compileString(extra_prg, extra_prg_fieldnames);
00297     }
00298     else
00299     {
00300         extra_prg_.clear();
00301         extra_prg_fieldnames.resize(0);
00302     }
00303 
00304 }
00305 
00306 void VPLPreprocessedLearner2::initializeOutputPrograms()
00307 {
00308     TVec<string> orig_input_fieldnames = orig_fieldnames.subVec(0,orig_inputsize);
00309     TVec<string> orig_target_fieldnames = orig_fieldnames.subVec(orig_inputsize, orig_targetsize);
00310 
00311     if(!output_prg.empty())
00312     {
00313         output_prg_.setSourceFieldNames(concat(orig_input_fieldnames,learner_->getOutputNames()) );
00314         output_prg_.compileString(output_prg, output_prg_fieldnames);
00315     }
00316     else
00317     {
00318         output_prg_.clear();
00319         output_prg_fieldnames.resize(0);
00320     }
00321 
00322     if(!costs_prg.empty())
00323     {
00324         costs_prg_.setSourceFieldNames(concat(orig_input_fieldnames,orig_target_fieldnames,learner_->getOutputNames(),learner_->getTestCostNames()) );
00325         costs_prg_.compileString(costs_prg, costs_prg_fieldnames);
00326     }
00327     else
00328     {
00329         costs_prg_.clear();
00330         costs_prg_fieldnames.resize(0);
00331     }
00332 }
00333 
00334 void VPLPreprocessedLearner2::setTrainingSet(VMat training_set, bool call_forget)
00335 {
00336     PLASSERT( learner_ );
00337 
00338     bool training_set_has_changed = !train_set || !(train_set->looksTheSameAs(training_set));
00339     if (call_forget && !training_set_has_changed)
00340         // In this case, learner_->build() will not have been called, which may
00341         // cause trouble if it updates data from the training set.
00342         learner_->build();
00343 
00344     orig_fieldnames = training_set->fieldNames();
00345     orig_inputsize  = training_set->inputsize();
00346     orig_targetsize  = training_set->targetsize();
00347     initializeInputPrograms();
00348 
00349     VMat filtered_trainset = training_set;
00350     PPath filtered_trainset_metadatadir = getExperimentDirectory() / "filtered_train_set.metadata";
00351     if(!filtering_prg.empty())
00352         filtered_trainset = new FilteredVMatrix(training_set, filtering_prg, filtered_trainset_metadatadir, verbosity>1,
00353                                                 use_filtering_prg_for_repeat, repeat_id_field_name, repeat_count_field_name);
00354 
00355     VMat processed_trainset = new ProcessingVMatrix(filtered_trainset, input_prg, target_prg, weight_prg, extra_prg);
00356     learner_->setTrainingSet(processed_trainset, false);
00357     inherited::setTrainingSet(training_set, call_forget); // will call forget if needed
00358 
00359     initializeOutputPrograms();
00360 }
00361 
00362 /*
00363 void VPLPreprocessedLearner2::test(VMat testset, PP<VecStatsCollector> test_stats, VMat testoutputs, VMat testcosts) const
00364 {
00365 
00366     inherited::test(testset, test_stats, testoutputs, testcosts);
00367 */
00368 /*
00369     VMat filtered_testset = testset;
00370     PPath filtered_testset_metadatadir = getExperimentDirectory() / "filtered_test_set.metadata";
00371 
00372     // DO NOT FILTER THE TESTSET
00373     //if(!filtering_prg.empty())
00374         //filtered_testset = new FilteredVMatrix(testset, filtering_prg, filtered_testset_metadatadir, verbosity>1);
00375 
00376     VMat processed_testset = new ProcessingVMatrix(filtered_testset, input_prg, target_prg, weight_prg, extra_prg);
00377 
00378     int l = processed_testset.length();
00379     Vec input;
00380     Vec target;
00381     real weight;
00382     Vec proc_input;
00383     Vec proc_target;
00384     real proc_weight;
00385 
00386     Vec output(outputsize());
00387 
00388     Vec costs(nTestCosts());
00389 
00390     // testset->defineSizes(inputsize(),targetsize(),weightsize());
00391 
00392     PP<ProgressBar> pb;
00393     if(report_progress) 
00394         pb = new ProgressBar("Testing learner",l);
00395 
00396     if (l == 0) {
00397         // Empty test set: we give -1 cost arbitrarily.
00398         costs.fill(-1);
00399         test_stats->update(costs);
00400     }
00401 
00402 
00403     perr << "VPLPreprocessedLearner2::test class=" << this->classname()
00404          << "\tl=" << l 
00405          << "\tinputsize=" << processed_testset->inputsize() 
00406          << "\ttargetsize=" << processed_testset->targetsize() 
00407          << "\tweightsize=" << processed_testset->weightsize() 
00408          << endl;
00409 
00410     for(int i=0; i<l; i++)
00411     {
00412         processed_testset.getExample(i, proc_input, proc_target, proc_weight);
00413         filtered_testset.getExample(i, input, target, weight);
00414       
00415         // Always call computeOutputAndCosts, since this is better
00416         // behaved with stateful learners
00417         pre_costs.resize(learner_->nTestCosts());
00418         learner_->computeOutputAndCosts(proc_input,proc_target,pre_output,pre_costs);
00419 
00420         if(!output_prg.empty())
00421             output_prg_.run(concat(input,pre_output), output);
00422         else
00423             output << pre_output;
00424 
00425         if(!costs_prg.empty())
00426             costs_prg_.run(concat(input,target,pre_output,pre_costs), costs);
00427         else
00428             costs << pre_costs;
00429       
00430         if(testoutputs)
00431             testoutputs->putOrAppendRow(i,output);
00432 
00433         if(testcosts)
00434             testcosts->putOrAppendRow(i, costs);
00435 
00436         if(test_stats)
00437             test_stats->update(costs,proc_weight);
00438 
00439         if(report_progress)
00440             pb->update(i);
00441     }
00442 */
00443 /*
00444 }
00445 */
00446 
00447 
00448 
00449 void VPLPreprocessedLearner2::computeOutput(const Vec& input, Vec& output) const
00450 {
00451     PLASSERT( learner_ );
00452     output.resize(outputsize());
00453     Vec newinput = input;
00454     if(!input_prg.empty())
00455     {
00456         processed_input.resize(input_prg_fieldnames.length());
00457         input_prg_.run(input, processed_input);
00458         newinput = processed_input;
00459     }
00460 
00461     if(!output_prg.empty())
00462     {
00463         learner_->computeOutput(newinput, pre_output);
00464         // as context for output postproc
00465         output_prg_.run(concat(input,pre_output), output);
00466     }
00467     else
00468         learner_->computeOutput(newinput, output);
00469     
00470 }
00471 
00472 void VPLPreprocessedLearner2::computeOutputAndCosts(const Vec& input, const Vec& target, 
00473                                                    Vec& output, Vec& costs) const
00474 { 
00475     output.resize(outputsize());
00476     costs.resize(nTestCosts());
00477 
00478     if(ignore_test_costs)
00479     {
00480         costs.fill(-1);
00481         return computeOutput(input, output);
00482     }
00483 
00484     PLASSERT( learner_ );
00485     PLASSERT(input.length()==inputsize());
00486     PLASSERT(target.length()==targetsize());
00487 
00488     Vec newinput = input;
00489     if(!input_prg.empty())//input_prg_)
00490     {
00491         processed_input.resize(input_prg_fieldnames.length());
00492         input_prg_.run(input, processed_input);
00493         newinput = processed_input;
00494     }
00495 
00496     Vec orig_row = concat(input,target);
00497     orig_row.resize(orig_fieldnames.length());
00498 
00499     Vec newtarget = target;
00500     if(!target_prg.empty())//target_prg_)
00501     {
00502         processed_target.resize(target_prg_fieldnames.length());
00503         target_prg_.run(orig_row, processed_target);
00504         newtarget = processed_target;
00505     }
00506 
00507     pre_costs.resize(learner_->nTestCosts());
00508     learner_->computeOutputAndCosts(newinput, newtarget, pre_output, pre_costs);
00509 
00510     if(!output_prg.empty())//output_prg_)
00511         output_prg_.run(concat(input,pre_output), output);
00512     else
00513         output << pre_output;
00514 
00515    
00516     if(!costs_prg.empty())//costs_prg_)
00517         costs_prg_.run(concat(input,target,pre_output,pre_costs), costs);
00518     else
00519         costs << pre_costs;
00520 }
00521 
00522 void VPLPreprocessedLearner2::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00523                                                      const Vec& target, Vec& costs) const
00524 { 
00525     Vec nonconst_output = output; // to make the constipated compiler happy
00526     computeOutputAndCosts(input, target, nonconst_output, costs); 
00527 }
00528 
00529 bool VPLPreprocessedLearner2::computeConfidenceFromOutput(
00530     const Vec& input, const Vec& output,
00531     real probability, TVec< pair<real,real> >& intervals) const
00532 {
00533     int d = outputsize();
00534     if(d!=output.length())
00535         PLERROR("In VPLPreprocessedLearner2::computeConfidenceFromOutput, length of passed output (%d)"
00536                 "differes from outputsize (%d)!",output.length(),d);
00537 
00538     PLASSERT( learner_ );
00539     Vec newinput = input;
00540     if(!input_prg.empty())//input_prg_)
00541     {
00542         processed_input.resize(input_prg_fieldnames.length());
00543         input_prg_.run(input, processed_input);
00544         newinput = processed_input;
00545     }
00546 
00547     bool status = false;
00548     if(output_prg.empty())
00549         status = learner_->computeConfidenceFromOutput(newinput, output, probability, intervals);
00550     else // must recompute the output of underlying learner, and post-process returned intervals
00551     {
00552         learner_->computeOutput(newinput, pre_output);
00553         TVec< pair<real,real> > pre_intervals;
00554         status = learner_->computeConfidenceFromOutput(newinput, pre_output, probability, pre_intervals);
00555         if(!status) // no confidence computation available
00556         {
00557             intervals.resize(d);
00558             for(int k=0; k<d; k++)
00559                 intervals[k] = pair<real,real>(MISSING_VALUE,MISSING_VALUE);
00560         }
00561         else // postprocess low and high vectors
00562         {
00563             int ud = learner_->outputsize(); // dimension of underlying learner's output
00564             // first build low and high vectors
00565             Vec low(ud);
00566             Vec high(ud);
00567             for(int k=0; k<ud; k++)
00568             {
00569                 pair<real,real> p = pre_intervals[k];
00570                 low[k] = p.first;
00571                 high[k] = p.second;
00572             }
00573             Vec post_low(d); // postprocesed low
00574             Vec post_high(d); // postprocessed high
00575 
00576             output_prg_.run(concat(input,low), post_low);
00577             output_prg_.run(concat(input,high), post_high);
00578 
00579             // Now copy post_low and post_high to intervals
00580             intervals.resize(d);
00581             for(int k=0; k<d; k++)
00582                 intervals[k] = pair<real,real>(post_low[k],post_high[k]);
00583         }
00584     }
00585     return status;
00586 }
00587 
00588 TVec<string> VPLPreprocessedLearner2::getOutputNames() const
00589 {
00590     if(!output_prg.empty())//output_prg_)
00591         return output_prg_fieldnames;
00592     else
00593         return learner_->getOutputNames();
00594 }
00595 
00596 
00597 TVec<string> VPLPreprocessedLearner2::getTestCostNames() const
00598 {
00599     if(!costs_prg.empty())//costs_prg_)
00600         return costs_prg_fieldnames;
00601     else
00602         return learner_->getTestCostNames();
00603 }
00604 
00605 TVec<string> VPLPreprocessedLearner2::getTrainCostNames() const
00606 {
00607     PLASSERT( learner_ );
00608     return learner_->getTrainCostNames();
00609 }
00610 
00611 void VPLPreprocessedLearner2::resetInternalState()
00612 {
00613     PLASSERT( learner_ );
00614     learner_->resetInternalState();
00615 }
00616 
00617 bool VPLPreprocessedLearner2::isStatefulLearner() const
00618 {
00619     PLASSERT( learner_ );
00620     return learner_->isStatefulLearner();
00621 }
00622 
00623 
00624 } // end of namespace PLearn
00625 
00626 
00627 /*
00628   Local Variables:
00629   mode:c++
00630   c-basic-offset:4
00631   c-file-style:"stroustrup"
00632   c-file-offsets:((innamespace . 0)(inline-open . 0))
00633   indent-tabs-mode:nil
00634   fill-column:79
00635   End:
00636 */
00637 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines