PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // VPLCombinedLearner.cc 00004 // 00005 // Copyright (C) 2005, 2006 Pascal Vincent 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: VPLCombinedLearner.cc 5480 2006-05-03 18:57:39Z plearner $ 00037 ******************************************************* */ 00038 00039 // Authors: Pascal Vincent 00040 00044 #include "VPLCombinedLearner.h" 00045 #include <plearn/vmat/ProcessingVMatrix.h> 00046 #include <plearn/vmat/FilteredVMatrix.h> 00047 #include <plearn/base/tostring.h> 00048 00049 namespace PLearn { 00050 using namespace std; 00051 00052 VPLCombinedLearner::VPLCombinedLearner() 00053 :orig_inputsize(-1), 00054 orig_targetsize(-1) 00055 { 00056 } 00057 00058 PLEARN_IMPLEMENT_OBJECT( 00059 VPLCombinedLearner, 00060 "Learner that will train several sub-learners and whose output will be a VPL-expressed function of the outputs of the sub-learnes.", 00061 "See VMatLanguage for the definition of the allowed VPL syntax.\n" 00062 "To allow sub-learners to get their own particular view of the training set,\n" 00063 "it is often convenient to use VPLPreprocessedLearners for the sub-learners.\n" 00064 ); 00065 00066 void VPLCombinedLearner::declareOptions(OptionList& ol) 00067 { 00068 // ### Declare all of this object's options here 00069 // ### For the "flags" of each option, you should typically specify 00070 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00071 // ### OptionBase::tuningoption. Another possible flag to be combined with 00072 // ### is OptionBase::nosave 00073 00074 // ### ex: 00075 // declareOption(ol, "myoption", &VPLCombinedLearner::myoption, OptionBase::buildoption, 00076 // "Help text describing this option"); 00077 // ... 00078 00079 declareOption(ol, "sublearners", &VPLCombinedLearner::sublearners_, 00080 OptionBase::buildoption, 00081 "The list of sub-learners that will receive the training set."); 00082 00083 declareOption(ol, "output_prg", &VPLCombinedLearner::output_prg, OptionBase::buildoption, 00084 "Program string in VPL language to compute this learner's outputs\n" 00085 "from a concatenation of the raw input fields and the sublearners' outputs,\n" 00086 "renamed as learner0.outputname learner1.outputname, etc... \n" 00087 "Note that outputs are often named out0, out1, out2, ...\n" 00088 "Note that new outputnames must be given to the generated values with :fieldname VPL syntax.\n" 00089 "If it's an empty string, then we'll output the sub-learner's outputs.\n"); 00090 00091 declareOption(ol, "costs_prg", &VPLCombinedLearner::costs_prg, OptionBase::buildoption, 00092 "Program string in VPL language to obtain postprocessed test costs\n" 00093 "from a concatenation of the raw input fields and arget fields, \n" 00094 "and the sublearners' outputs and test costs.\n" 00095 "Note that names must be given to the generated values with :fieldname VPL syntax.\n" 00096 "If it's an empty string, then we'll output the underlying learner's test costs.\n" 00097 "Note that this processing is only applied to test costs, not to train costs which are returned as is."); 00098 00099 declareOption(ol, "orig_fieldnames", &VPLCombinedLearner::orig_fieldnames, OptionBase::learntoption, 00100 "original fieldnames of the training set"); 00101 declareOption(ol, "orig_inputsize", &VPLCombinedLearner::orig_inputsize, OptionBase::learntoption, 00102 "original inputsize of the training set"); 00103 declareOption(ol, "orig_targetsize", &VPLCombinedLearner::orig_targetsize, OptionBase::learntoption, 00104 "original targetsize of the training set"); 00105 00106 // Now call the parent class' declareOptions 00107 inherited::declareOptions(ol); 00108 } 00109 00110 void VPLCombinedLearner::build_() 00111 { 00112 if(train_set.isNull() && (orig_inputsize>0 || orig_targetsize>0) ) // we're probably reloading a saved VPLCombinedLearner 00113 { 00114 initializeOutputPrograms(); 00115 } 00116 else 00117 initializeCostNames(); 00118 } 00119 00120 // ### Nothing to add here, simply calls build_ 00121 void VPLCombinedLearner::build() 00122 { 00123 inherited::build(); 00124 build_(); 00125 } 00126 00127 void VPLCombinedLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00128 { 00129 inherited::makeDeepCopyFromShallowCopy(copies); 00130 00131 // ### Call deepCopyField on all "pointer-like" fields 00132 // ### that you wish to be deepCopied rather than 00133 // ### shallow-copied. 00134 00135 deepCopyField(sublearners_, copies); 00136 00137 output_prg_.makeDeepCopyFromShallowCopy(copies); 00138 costs_prg_.makeDeepCopyFromShallowCopy(copies); 00139 00140 deepCopyField(outputnames_, copies); 00141 deepCopyField(costnames_, copies); 00142 deepCopyField(invec_for_output_prg, copies); 00143 deepCopyField(invec_for_costs_prg, copies); 00144 00145 deepCopyField(sublearners_outputsizes, copies); 00146 deepCopyField(sublearners_ntestcosts, copies); 00147 deepCopyField(orig_fieldnames, copies); 00148 } 00149 00150 void VPLCombinedLearner::setValidationSet(VMat validset) 00151 { 00152 inherited::setValidationSet(validset); 00153 for(int k=0; k<sublearners_.length(); k++) 00154 sublearners_[k]->setValidationSet(validset); 00155 } 00156 00157 void VPLCombinedLearner::setTrainStatsCollector(PP<VecStatsCollector> statscol) 00158 { 00159 inherited::setTrainStatsCollector(statscol); 00160 for(int k=0; k<sublearners_.length(); k++) 00161 sublearners_[k]->setTrainStatsCollector(new VecStatsCollector()); 00162 } 00163 00164 int VPLCombinedLearner::outputsize() const 00165 { 00166 return outputnames_.size(); 00167 } 00168 00169 void VPLCombinedLearner::setExperimentDirectory(const PPath& the_expdir) 00170 { 00171 inherited::setExperimentDirectory(the_expdir); 00172 for(int k=0; k<sublearners_.length(); k++) 00173 sublearners_[k]->setExperimentDirectory(the_expdir/("SubLearner_"+tostring(k))); 00174 } 00175 00176 void VPLCombinedLearner::forget() 00177 { 00178 for(int k=0; k<sublearners_.length(); k++) 00179 sublearners_[k]->forget(); 00180 stage = 0; 00181 } 00182 00183 void VPLCombinedLearner::train() 00184 { 00185 for(int k=0; k<sublearners_.length(); k++) 00186 sublearners_[k]->train(); 00187 ++stage; 00188 } 00189 00190 void VPLCombinedLearner::initializeOutputPrograms() 00191 { 00192 TVec<string> orig_input_fieldnames = orig_fieldnames.subVec(0,orig_inputsize); 00193 TVec<string> orig_target_fieldnames = orig_fieldnames.subVec(orig_inputsize, orig_targetsize); 00194 00195 int nlearners = sublearners_.length(); 00196 sublearners_outputsizes.resize(nlearners); 00197 sublearners_ntestcosts.resize(nlearners); 00198 00199 TVec<string> infields_for_output_prg = orig_input_fieldnames.copy(); 00200 TVec<string> infields_for_costs_prg = concat(orig_input_fieldnames,orig_target_fieldnames); 00201 00202 outputnames_.resize(0); 00203 costnames_.resize(0); 00204 00205 for(int k=0; k<nlearners; k++) 00206 { 00207 char tmp[100]; 00208 sprintf(tmp,"learner%d.",k); 00209 string prefix(tmp); 00210 00211 int nout = sublearners_[k]->outputsize(); 00212 sublearners_outputsizes[k] = nout; 00213 TVec<string> outputnames = sublearners_[k]->getOutputNames(); 00214 for(int p=0; p<nout; p++) 00215 { 00216 string outname = prefix+outputnames[p]; 00217 if(output_prg.empty()) 00218 outputnames_.append(outname); 00219 else 00220 infields_for_output_prg.append(prefix+outputnames[p]); 00221 00222 if(!costs_prg.empty()) 00223 infields_for_costs_prg.append(prefix+outputnames[p]); 00224 } 00225 00226 int ntest = sublearners_[k]->nTestCosts(); 00227 sublearners_ntestcosts[k] = ntest; 00228 TVec<string> testcostnames = sublearners_[k]->getTestCostNames(); 00229 for(int p=0; p<ntest; p++) 00230 { 00231 string costname = prefix+testcostnames[p]; 00232 if(costs_prg.empty()) 00233 costnames_.append(costname); 00234 else 00235 infields_for_costs_prg.append(costname); 00236 } 00237 } 00238 00239 if(!output_prg.empty()) 00240 { 00241 output_prg_.setSourceFieldNames(infields_for_output_prg); 00242 output_prg_.compileString(output_prg, outputnames_); 00243 } 00244 00245 if(!costs_prg.empty()) 00246 { 00247 costs_prg_.setSourceFieldNames(infields_for_costs_prg); 00248 costs_prg_.compileString(costs_prg, costnames_); 00249 } 00250 00251 } 00252 00253 void VPLCombinedLearner::initializeCostNames() 00254 { 00255 int nlearners = sublearners_.length(); 00256 sublearners_ntestcosts.resize(nlearners); 00257 00258 costnames_.resize(0); 00259 00260 for(int k=0; k<nlearners; k++) 00261 { 00262 char tmp[100]; 00263 sprintf(tmp,"learner%d.",k); 00264 string prefix(tmp); 00265 00266 int ntest = sublearners_[k]->nTestCosts(); 00267 sublearners_ntestcosts[k] = ntest; 00268 TVec<string> testcostnames = sublearners_[k]->getTestCostNames(); 00269 for(int p=0; p<ntest; p++) 00270 { 00271 string costname = prefix+testcostnames[p]; 00272 if(costs_prg.empty()) 00273 costnames_.append(costname); 00274 } 00275 } 00276 00277 if(!costs_prg.empty()) 00278 VMatLanguage::getOutputFieldNamesFromString(costs_prg, costnames_); 00279 00280 } 00281 00282 void VPLCombinedLearner::setTrainingSet(VMat training_set, bool call_forget) 00283 { 00284 bool training_set_has_changed = !train_set || !(train_set->looksTheSameAs(training_set)); 00285 if (call_forget && !training_set_has_changed) 00286 { 00287 // In this case, the sublearner's build() will not have been called, which may 00288 // cause trouble if it updates data from the training set. 00289 // NOTE: I'M NOT QUITE SURE WHAT THE ABOVE SITUATION MEANS. 00290 // BUT FOR NOW, LET'S BELIEVE IT'S TRUE, SO WE MUST CALL build_ ON THE SUBLEARNERS 00291 for(int k=0; k<sublearners_.length(); k++) 00292 sublearners_[k]->build(); 00293 } 00294 00295 orig_fieldnames = training_set->fieldNames(); 00296 orig_inputsize = training_set->inputsize(); 00297 orig_targetsize = training_set->targetsize(); 00298 00299 for(int k=0; k<sublearners_.length(); k++) 00300 sublearners_[k]->setTrainingSet(training_set, call_forget); 00301 00302 inherited::setTrainingSet(training_set, call_forget); // will call forget if needed 00303 00304 initializeOutputPrograms(); 00305 } 00306 00307 00308 void VPLCombinedLearner::computeOutput(const Vec& input, Vec& output) const 00309 { 00310 output.resize(outputsize()); 00311 int nlearners = sublearners_.length(); 00312 00313 int ninputs = inputsize(); 00314 int outpos = 0; 00315 if(!output_prg.empty()) // no output_prg: output is simply concatenation of sublearner's outputs 00316 { 00317 invec_for_output_prg.resize(output_prg_.inputsize()); 00318 invec_for_output_prg.subVec(outpos, ninputs) << input; // copy input part into invec_for_output_prg 00319 outpos += ninputs; 00320 } 00321 00322 Vec outvec; 00323 for(int k=0; k<nlearners; k++) 00324 { 00325 int nout = sublearners_outputsizes[k]; 00326 if(output_prg.empty()) // no output_prg: output is simply concatenation of sublearner's outputs 00327 outvec = output.subVec(outpos,nout); 00328 else 00329 outvec = invec_for_output_prg.subVec(outpos, nout); 00330 sublearners_[k]->computeOutput(input, outvec); 00331 outpos += nout; 00332 } 00333 00334 if(!output_prg.empty()) 00335 output_prg_.run(invec_for_output_prg, output); 00336 } 00337 00338 void VPLCombinedLearner::computeOutputAndCosts(const Vec& input, const Vec& target, 00339 Vec& output, Vec& costs) const 00340 { 00341 output.resize(outputsize()); 00342 costs.resize(nTestCosts()); 00343 00344 PLASSERT(input.length()==inputsize()); 00345 PLASSERT(target.length()==targetsize()); 00346 00347 output.resize(outputsize()); 00348 int nlearners = sublearners_.length(); 00349 00350 int ninputs = inputsize(); 00351 int ntargets= targetsize(); 00352 int outpos = 0; 00353 int costspos = 0; 00354 00355 if(!output_prg.empty()) 00356 { 00357 invec_for_output_prg.resize(output_prg_.inputsize()); 00358 invec_for_output_prg.subVec(outpos,ninputs) << input; 00359 outpos += ninputs; 00360 } 00361 if(!costs_prg.empty()) 00362 { 00363 invec_for_costs_prg.resize(costs_prg_.inputsize()); 00364 invec_for_costs_prg.subVec(costspos,ninputs) << input; 00365 costspos += ninputs; 00366 invec_for_costs_prg.subVec(costspos,ntargets) << target; 00367 costspos += ntargets; 00368 } 00369 00370 Vec outvec; 00371 Vec costvec; 00372 for(int k=0; k<nlearners; k++) 00373 { 00374 int nout = sublearners_outputsizes[k]; 00375 int ncosts = sublearners_ntestcosts[k]; 00376 00377 if(output_prg.empty()) 00378 outvec = output.subVec(outpos, nout); 00379 else 00380 outvec = invec_for_output_prg.subVec(outpos, nout); 00381 00382 if(costs_prg.empty()) 00383 costvec = costs.subVec(costspos, ncosts); 00384 else 00385 costvec = invec_for_costs_prg.subVec(costspos+nout, ncosts); 00386 00387 sublearners_[k]->computeOutputAndCosts(input, target, outvec, costvec); 00388 if(!costs_prg.empty()) // copy outvec into correct position in invec_for_costs_prg 00389 { 00390 invec_for_costs_prg.subVec(costspos, nout) << outvec; 00391 costspos += nout+ncosts; 00392 } 00393 outpos += nout; 00394 } 00395 00396 if(!output_prg.empty()) 00397 output_prg_.run(invec_for_output_prg, output); 00398 00399 if(!costs_prg.empty()) 00400 costs_prg_.run(invec_for_costs_prg, costs); 00401 } 00402 00403 00404 void VPLCombinedLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 00405 const Vec& target, Vec& costs) const 00406 { 00407 Vec nonconst_output = output; // to make the constipated compiler happy 00408 computeOutputAndCosts(input, target, nonconst_output, costs); 00409 } 00410 00411 TVec<string> VPLCombinedLearner::getOutputNames() const 00412 { 00413 return outputnames_; 00414 } 00415 00416 TVec<string> VPLCombinedLearner::getTestCostNames() const 00417 { 00418 return costnames_; 00419 } 00420 00421 TVec<string> VPLCombinedLearner::getTrainCostNames() const 00422 { 00423 00424 return TVec<string>(); 00425 } 00426 00427 void VPLCombinedLearner::resetInternalState() 00428 { 00429 for(int k=0; k<sublearners_.length(); k++) 00430 sublearners_[k]->resetInternalState(); 00431 } 00432 00433 bool VPLCombinedLearner::isStatefulLearner() const 00434 { 00435 return sublearners_[0]->isStatefulLearner(); 00436 } 00437 00438 00439 } // end of namespace PLearn 00440 00441 00442 /* 00443 Local Variables: 00444 mode:c++ 00445 c-basic-offset:4 00446 c-file-style:"stroustrup" 00447 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00448 indent-tabs-mode:nil 00449 fill-column:79 00450 End: 00451 */ 00452 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :