PLearn 0.1
BasisSelectionRegressor.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // BasisSelectionRegressor.cc
00004 //
00005 // Copyright (C) 2006 Pascal Vincent
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Vincent
00036 
00040 #include "BasisSelectionRegressor.h"
00041 #include <plearn/math/RealFunctionOfInputFeature.h>
00042 #include <plearn/math/ShiftAndRescaleFeatureRealFunction.h>
00043 #include <plearn/math/RealFunctionFromKernel.h>
00044 #include <plearn/math/ConstantRealFunction.h>
00045 #include <plearn/math/RealFunctionProduct.h>
00046 #include <plearn/math/RealValueIndicatorFunction.h>
00047 #include <plearn/math/RealRangeIndicatorFunction.h>
00048 #include <plearn/vmat/MemoryVMatrix.h>
00049 #include <plearn/math/random.h>
00050 #include <plearn/vmat/RealFunctionsProcessedVMatrix.h>
00051 
00052 #include <boost/thread.hpp>
00053 
00054 
00055 namespace PLearn {
00056 using namespace std;
00057 
00058 PLEARN_IMPLEMENT_OBJECT(
00059     BasisSelectionRegressor,
00060     "This learner is able to incrementally select a basis of functions to be used to produce processed features fed to the underlying learner",
00061     "Functions are chosen among a dictionary of functions specified through the options.\n"
00062     "At each stage, the next function to append to the basis is chosen to be the one most colinear with the residue.\n"
00063     "This learner can be used to perform simple feature selection.\n"
00064     "The underlying learner is typically a linear regressor (this linear case might get \n"
00065     "implemented directly in this learner in future versions to skip unnecessary recomputation).");
00066 
00067 BasisSelectionRegressor::BasisSelectionRegressor()
00068     : consider_constant_function(false),
00069       consider_raw_inputs(true),
00070       consider_normalized_inputs(false),
00071       consider_input_range_indicators(false),
00072       fixed_min_range(false),
00073       indicator_desired_prob(0.05),
00074       indicator_min_prob(0.01),
00075       n_kernel_centers_to_pick(-1),
00076       consider_interaction_terms(false),
00077       max_interaction_terms(-1),
00078       consider_n_best_for_interaction(-1),
00079       interaction_max_order(-1),
00080       consider_sorted_encodings(false),
00081       max_n_vals_for_sorted_encodings(-1),
00082       normalize_features(false),
00083       precompute_features(true),
00084       n_threads(0),
00085       thread_subtrain_length(0),
00086       use_all_basis(false),
00087       residue_sum(0),
00088       residue_sum_sq(0)
00089 {}
00090 
00091 void BasisSelectionRegressor::declareOptions(OptionList& ol)
00092 {
00093     //#####  Public Build Options  ############################################
00094 
00095     declareOption(ol, "consider_constant_function", &BasisSelectionRegressor::consider_constant_function,
00096                   OptionBase::buildoption,
00097                   "If true, the constant function is included in the dictionary");
00098 
00099     declareOption(ol, "explicit_functions", &BasisSelectionRegressor::explicit_functions,
00100                   OptionBase::buildoption,
00101                   "This (optional) list of explicitly given RealFunctions\n"
00102                   "will get included in the dictionary");
00103 
00104     declareOption(ol, "explicit_interaction_variables", &BasisSelectionRegressor::explicit_interaction_variables,
00105                   OptionBase::buildoption,
00106                   "This (optional) list of explicitly given variables (fieldnames)\n"
00107                   "will get included in the dictionary for interaction terms ONLY\n"
00108                   "(i.e. these interact with the other functions.)");
00109 
00110     declareOption(ol, "mandatory_functions", &BasisSelectionRegressor::mandatory_functions,
00111                   OptionBase::buildoption,
00112                   "This (optional) list of explicitly given RealFunctions\n"
00113                   "will be automatically selected.");
00114 
00115     declareOption(ol, "consider_raw_inputs", &BasisSelectionRegressor::consider_raw_inputs,
00116                   OptionBase::buildoption,
00117                   "If true, then functions which select one of the raw inputs\n" 
00118                   "will be included in the dictionary."
00119                   "Beware that missing values (NaN) will be left as such.");
00120 
00121     declareOption(ol, "consider_normalized_inputs", &BasisSelectionRegressor::consider_normalized_inputs,
00122                   OptionBase::buildoption,
00123                   "If true, then functions which select and normalize inputs\n" 
00124                   "will be included in the dictionary. \n"
00125                   "Missing values will be replaced by 0 (i.e. the mean of normalized input)\n"
00126                   "Inputs which have nearly zero variance will be ignored.\n");
00127 
00128     declareOption(ol, "consider_input_range_indicators", &BasisSelectionRegressor::consider_input_range_indicators,
00129                   OptionBase::buildoption,
00130                   "If true, then we'll include in the dictionary indicator functions\n"
00131                   "triggered by input ranges and input special values\n"
00132                   "Special values will include all symbolic values\n"
00133                   "(detected by the existance of a corresponding string mapping)\n"
00134                   "as well as MISSING_VALUE (nan) (if it's present more than \n"
00135                   "indicator_min_prob fraction of the training set).\n"
00136                   "Real ranges will be formed in accordance to indicator_desired_prob \n"
00137                   "and indicator_min_prob options. The necessary statistics are obtained\n"
00138                   "from the counts in the StatsCollector of the train_set VMatrix.\n");
00139 
00140     declareOption(ol, "fixed_min_range", &BasisSelectionRegressor::fixed_min_range,
00141                   OptionBase::buildoption,
00142                   "If true, the min value of all range functions will be set to -FLT_MAX.\n"
00143                   "This correspond to a 'thermometer' type of mapping.");
00144 
00145     declareOption(ol, "indicator_desired_prob", &BasisSelectionRegressor::indicator_desired_prob,
00146                   OptionBase::buildoption,
00147                   "The algo will try to build input ranges that have at least that probability of occurence in the training set.");
00148 
00149     declareOption(ol, "indicator_min_prob", &BasisSelectionRegressor::indicator_min_prob,
00150                   OptionBase::buildoption,
00151                   "This will be used instead of indicator_desired_prob, for missing values, \n"
00152                   "and ranges immediately followed by a symbolic value");
00153 
00154     declareOption(ol, "kernels", &BasisSelectionRegressor::kernels,
00155                   OptionBase::buildoption,
00156                   "If given then each of these kernels, centered on each of the kernel_centers \n"
00157                   "will be included in the dictionary");
00158 
00159     declareOption(ol, "kernel_centers", &BasisSelectionRegressor::kernel_centers,
00160                   OptionBase::buildoption,
00161                   "If you specified a non empty kernels, you can give a matrix of explicit \n"
00162                   "centers here. Alternatively you can specify n_kernel_centers_to_pick.\n");
00163 
00164     declareOption(ol, "n_kernel_centers_to_pick", &BasisSelectionRegressor::n_kernel_centers_to_pick,
00165                   OptionBase::buildoption,
00166                   "If >0 then kernel_centers will be generated by randomly picking \n"
00167                   "n_kernel_centers_to_pick data points from the training set \n"
00168                   "(don't forget to set the seed option)");
00169 
00170     declareOption(ol, "consider_interaction_terms", &BasisSelectionRegressor::consider_interaction_terms,
00171                   OptionBase::buildoption,
00172                   "If true, the dictionary will be enriched, at each stage, by the product of\n"
00173                   "each of the already chosen basis functions with each of the dictionary functions\n");
00174 
00175     declareOption(ol, "max_interaction_terms", &BasisSelectionRegressor::max_interaction_terms,
00176                   OptionBase::buildoption,
00177                   "Maximum number of interaction terms to consider.  -1 means no max.\n"
00178                   "If more terms are possible, some are chosen randomly at each stage.\n");
00179 
00180     declareOption(ol, "consider_n_best_for_interaction", &BasisSelectionRegressor::consider_n_best_for_interaction,
00181                   OptionBase::buildoption,
00182                   "Only the top best functions of single variables are considered when building interaction terms.  -1 means no max.\n");
00183 
00184     declareOption(ol, "interaction_max_order", &BasisSelectionRegressor::interaction_max_order,
00185                   OptionBase::buildoption,
00186                   "Maximum order of a feature in an interaction function.  -1 means no max.\n");
00187 
00188     declareOption(ol, "consider_sorted_encodings", &BasisSelectionRegressor::consider_sorted_encodings,
00189                   OptionBase::buildoption,
00190                   "If true, the dictionary will be enriched with encodings sorted in target order.\n"
00191                   "This will be done for all fields with less than max_n_vals_for_sorted_encodings different values.\n");
00192 
00193     declareOption(ol, "max_n_vals_for_sorted_encodings", &BasisSelectionRegressor::max_n_vals_for_sorted_encodings,
00194                   OptionBase::buildoption,
00195                   "Maximum number of different values for a field to be considered for a sorted encoding.\n");
00196 
00197     declareOption(ol, "normalize_features", &BasisSelectionRegressor::normalize_features,
00198                   OptionBase::buildoption,
00199                   "EXPERIMENTAL OPTION (under development)");
00200 
00201     declareOption(ol, "learner", &BasisSelectionRegressor::template_learner,
00202                   OptionBase::buildoption,
00203                   "The underlying template learner.");
00204 
00205     declareOption(ol, "precompute_features", &BasisSelectionRegressor::precompute_features,
00206                   OptionBase::buildoption,
00207                   "True if features mat should be kept in memory; false if each row should be recalculated every time it is needed.");
00208 
00209     declareOption(ol, "n_threads", &BasisSelectionRegressor::n_threads,
00210                   OptionBase::buildoption,
00211                   "The number of threads to use when computing residue scores.\n"
00212                   "NOTE: MOST OF PLEARN IS NOT THREAD-SAFE; THIS CODE ASSUMES THAT SOME PARTS ARE, BUT THESE MAY CHANGE.");
00213 
00214     declareOption(ol, "thread_subtrain_length", &BasisSelectionRegressor::thread_subtrain_length,
00215                   OptionBase::buildoption,
00216                   "Preload thread_subtrain_length data when using multi-threading.");
00217 
00218     declareOption(ol, "use_all_basis", &BasisSelectionRegressor::use_all_basis,
00219                   OptionBase::buildoption,
00220                   "If true, we use the underlying learner on all basis functions generated by the BSR.\n"
00221                   "In this special way, all interaction terms are shut down and only 1 stage of training is necessary");
00222 
00223     //#####  Public Learnt Options  ############################################
00224 
00225     declareOption(ol, "selected_functions", &BasisSelectionRegressor::selected_functions,
00226                   OptionBase::learntoption,
00227                   "The list of real functions selected by the incremental algorithm.");
00228 
00229     declareOption(ol, "alphas", &BasisSelectionRegressor::alphas,
00230                   OptionBase::learntoption,
00231                   "CURRENTLY UNUSED");
00232 
00233     declareOption(ol, "scores", &BasisSelectionRegressor::scores,
00234                   OptionBase::learntoption,
00235                   "Matrice of the scores for each candidate function.\n"
00236                   "Used only when 'consider_n_best_for_interaction' > 0.");
00237 
00238     declareOption(ol, "candidate_functions", &BasisSelectionRegressor::candidate_functions,
00239                   OptionBase::learntoption,
00240                   "The list of current candidate functions.");
00241 
00242 
00243     declareOption(ol, "explicit_interaction_functions", &BasisSelectionRegressor::explicit_interaction_functions,
00244                   OptionBase::learntoption,
00245                   "This (optional) list of explicitly given RealFunctions\n"
00246                   "will get included in the dictionary for interaction terms ONLY\n"
00247                   "(i.e. these interact with the other functions.)");
00248 
00249     declareOption(ol, "true_learner", &BasisSelectionRegressor::learner,
00250                   OptionBase::learntoption,
00251                   "The underlying learner to be trained with the extracted features.");
00252 
00253 
00254     // Now call the parent class' declareOptions
00255     inherited::declareOptions(ol);
00256 }
00257 
00258 void BasisSelectionRegressor::build_()
00259 {
00260     if (use_all_basis)
00261     {
00262         PLASSERT_MSG(nstages == 1, "\"nstages\" must be 1 when \"use_all_basis\" is true");
00263         PLASSERT_MSG(!consider_interaction_terms, "\"consider_interaction_terms\" must be false when \"use_all_basis\" is true");
00264     }
00265 }
00266 
00267 
00268 void BasisSelectionRegressor::setExperimentDirectory(const PPath& the_expdir)
00269 { 
00270     inherited::setExperimentDirectory(the_expdir);
00271     template_learner->setExperimentDirectory(the_expdir / "SubLearner");
00272 }
00273 
00274 
00275 // ### Nothing to add here, simply calls build_
00276 void BasisSelectionRegressor::build()
00277 {
00278     inherited::build();
00279     build_();
00280 }
00281 
00282 
00283 void BasisSelectionRegressor::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00284 {
00285     inherited::makeDeepCopyFromShallowCopy(copies);
00286 
00287     deepCopyField(explicit_functions, copies);
00288     deepCopyField(explicit_interaction_functions, copies);
00289     deepCopyField(explicit_interaction_variables, copies);
00290     deepCopyField(mandatory_functions, copies);
00291     deepCopyField(kernels, copies);
00292     deepCopyField(kernel_centers, copies);
00293     deepCopyField(learner, copies);
00294     deepCopyField(template_learner, copies);
00295     deepCopyField(selected_functions, copies);
00296     deepCopyField(alphas, copies);
00297     deepCopyField(scores, copies);
00298 
00299     deepCopyField(simple_candidate_functions, copies);
00300     deepCopyField(candidate_functions, copies);
00301     deepCopyField(features, copies);
00302     deepCopyField(residue, copies);
00303     deepCopyField(targets, copies);
00304     deepCopyField(weights, copies);
00305 
00306     deepCopyField(input, copies);
00307     deepCopyField(targ, copies);
00308     deepCopyField(featurevec, copies);
00309 }
00310 
00311 
00312 int BasisSelectionRegressor::outputsize() const
00313 {
00314     //return 1;
00315     return template_learner->outputsize();
00316 }
00317 
00318 void BasisSelectionRegressor::forget()
00319 {
00320     selected_functions.resize(0);
00321     targets.resize(0);
00322     residue.resize(0);
00323     weights.resize(0);
00324     features.resize(0,0);
00325     if(n_kernel_centers_to_pick>=0)
00326         kernel_centers.resize(0,0);
00327     if(learner.isNotNull())
00328         learner->forget();
00329 
00330     candidate_functions.resize(0);
00331 
00332     stage = 0;
00333 }
00334 
00335 void BasisSelectionRegressor::appendCandidateFunctionsOfSingleField(int fieldnum, TVec<RealFunc>& functions) const
00336 {
00337     VMField field_info = train_set->getFieldInfos(fieldnum);
00338     string fieldname = field_info.name;
00339     VMField::FieldType fieldtype = field_info.fieldtype;
00340     StatsCollector& stats_collector = train_set->getStats(fieldnum);
00341 
00342     real n           = stats_collector.n();
00343     real nmissing    = stats_collector.nmissing();
00344     real nnonmissing = stats_collector.nnonmissing();
00345     real min_count     = indicator_min_prob * n;
00346     real desired_count = indicator_desired_prob * n;
00347 
00348     // Raw inputs for non-discrete variables
00349     if (consider_raw_inputs  &&  (fieldtype != VMField::DiscrGeneral))
00350     {
00351         RealFunc f = new RealFunctionOfInputFeature(fieldnum);
00352         f->setInfo(fieldname);
00353         functions.append(f);
00354     }
00355 
00356     // Normalized inputs for non-discrete variables
00357     if (consider_normalized_inputs  &&  (fieldtype != VMField::DiscrGeneral))
00358     {
00359         if (nnonmissing > 0)
00360         {
00361             real mean   = stats_collector.mean();
00362             real stddev = stats_collector.stddev();
00363             if (stddev > 1e-9)
00364             {
00365                 RealFunc f = new ShiftAndRescaleFeatureRealFunction(fieldnum, -mean, 1./stddev, 0.);
00366                 string info = fieldname + "-" + tostring(mean) + "/" + tostring(stddev);
00367                 f->setInfo(info);
00368                 functions.append(f);
00369             }
00370         }
00371 
00372     }
00373 
00374     if (consider_input_range_indicators)
00375     {
00376         const map<real,string>& smap = train_set->getRealToStringMapping(fieldnum);
00377         map<real,string>::const_iterator smap_it  = smap.begin();
00378         map<real,string>::const_iterator smap_end = smap.end();
00379 
00380         map<real, StatsCollectorCounts>* counts = stats_collector.getApproximateCounts();
00381         map<real,StatsCollectorCounts>::const_iterator count_it = counts->begin();
00382         map<real,StatsCollectorCounts>::const_iterator count_end = counts->end();
00383 
00384         // Indicator function for mapped variables
00385         while (smap_it != smap_end)
00386         {
00387             RealFunc f = new RealValueIndicatorFunction(fieldnum, smap_it->first);
00388             string info = fieldname + "=" + smap_it->second;
00389             f->setInfo(info);
00390             functions.append(f);
00391             ++smap_it;
00392         }
00393 
00394         // Indicator function for discrete variables not using mapping
00395         if (fieldtype == VMField::DiscrGeneral  ||  fieldtype == VMField::DiscrMonotonic)
00396         {
00397             while (count_it != count_end)
00398             {
00399                 real val = count_it->first;
00400                 // Make sure the variable don't use mapping for this particular value
00401                 bool mapped_value = false;
00402                 smap_it = smap.begin();
00403                 while (smap_it != smap_end)
00404                 {
00405                     if (smap_it->first == val)
00406                     {
00407                         mapped_value = true;
00408                         break;
00409                     }
00410                     ++smap_it;
00411                 }
00412 
00413                 if (!mapped_value)
00414                 {
00415                     RealFunc f = new RealValueIndicatorFunction(fieldnum, val);
00416                     string info = fieldname + "=" + tostring(val);
00417                     f->setInfo(info);
00418                     functions.append(f);
00419                 }
00420                 ++count_it;
00421             }
00422         }
00423 
00424         // If enough missing values, add an indicator function for it
00425         if (nmissing >= min_count && nnonmissing >= min_count)
00426         {
00427             RealFunc f = new RealValueIndicatorFunction(fieldnum, MISSING_VALUE);
00428             string info = fieldname + "=MISSING";
00429             f->setInfo(info);
00430             functions.append(f);
00431         }
00432 
00433         // For fieldtype DiscrGeneral, it stops here.
00434         // A RealRangeIndicatorFunction makes no sense for DiscrGeneral
00435         if (fieldtype == VMField::DiscrGeneral) return;
00436         
00437         real cum_count = 0;
00438         real low = -FLT_MAX;
00439         real val = FLT_MAX;
00440         count_it = counts->begin();
00441         while (count_it != count_end)
00442         {
00443             val = count_it->first;
00444             cum_count += count_it->second.nbelow;
00445             bool in_smap = (smap.find(val) != smap_end);
00446             if((cum_count>=desired_count || in_smap&&cum_count>=min_count) && (n-cum_count>=desired_count || in_smap&&n-cum_count>=min_count))
00447             {
00448                 RealRange range(']',low,val,'[');
00449                 if (fixed_min_range) range.low = -FLT_MAX;
00450                 RealFunc f = new RealRangeIndicatorFunction(fieldnum, range);
00451                 string info = fieldname + "__" + tostring(range);
00452                 f->setInfo(info);
00453                 functions.append(f);
00454                 cum_count = 0;
00455                 low = val;
00456             }
00457 
00458             cum_count += count_it->second.n;
00459             if (in_smap)
00460             {
00461                 cum_count = 0;
00462                 low = val;
00463             }
00464             else if (cum_count>=desired_count && n-cum_count>=desired_count)
00465             {
00466                 RealRange range(']',low,val,']');
00467                 if (fixed_min_range) range.low = -FLT_MAX;
00468                 RealFunc f = new RealRangeIndicatorFunction(fieldnum, range);
00469                 string info = fieldname + "__" + tostring(range);
00470                 f->setInfo(info);
00471                 functions.append(f);
00472                 cum_count = 0;
00473                 low = val;
00474             }            
00475             ++count_it;
00476         }
00477         // last chunk
00478         if (cum_count > 0)
00479         {
00480             if (cum_count>=min_count && n-cum_count>=min_count)
00481             {
00482                 RealRange range(']',low,val,']');
00483                 if (fixed_min_range) range.low = -FLT_MAX;
00484                 RealFunc f = new RealRangeIndicatorFunction(fieldnum, range);
00485                 string info = fieldname + "__" + tostring(range);
00486                 f->setInfo(info);
00487                 functions.append(f);
00488             }
00489             else if (functions.length()>0) // possibly lump it together with last range
00490             {
00491                 RealRangeIndicatorFunction* f = (RealRangeIndicatorFunction*)(RealFunction*)functions.lastElement();
00492                 RealRange& range = f->range;
00493                 if(smap.find(range.high) != smap_end) // last element does not appear to be symbolic
00494                 {                    
00495                     range.high = val; // OK, change the last range to include val
00496                     string info = fieldname + "__" + tostring(range);
00497                     f->setInfo(info);
00498                 }
00499             }
00500         }
00501     }
00502 }
00503 
00504 void BasisSelectionRegressor::appendKernelFunctions(TVec<RealFunc>& functions) const
00505 {
00506     if(kernel_centers.length()<=0 && n_kernel_centers_to_pick>=0)
00507     {
00508         int nc = n_kernel_centers_to_pick;
00509         kernel_centers.resize(nc, inputsize());
00510         real weight;
00511         int l = train_set->length();
00512         if(random_gen.isNull())
00513             random_gen = new PRandom();
00514         random_gen->manual_seed(seed_);
00515         for(int i=0; i<nc; i++)
00516         {
00517             Vec input = kernel_centers(i);
00518             int rowpos = min(int(l*random_gen->uniform_sample()),l-1);
00519             train_set->getExample(rowpos, input, targ, weight);
00520         }
00521     }
00522 
00523     for(int i=0; i<kernel_centers.length(); i++)
00524     {
00525         Vec center = kernel_centers(i);
00526         for(int k=0; k<kernels.length(); k++)
00527             functions.append(new RealFunctionFromKernel(kernels[k],center));
00528     }
00529 }
00530 
00531 void BasisSelectionRegressor::appendConstantFunction(TVec<RealFunc>& functions) const
00532 {
00533     functions.append(new ConstantRealFunction());
00534 }
00535 
00536 void BasisSelectionRegressor::buildSimpleCandidateFunctions()
00537 {
00538     if(consider_constant_function)
00539         appendConstantFunction(simple_candidate_functions);
00540     
00541     if(explicit_functions.length()>0)
00542         simple_candidate_functions.append(explicit_functions);
00543 
00544     for(int fieldnum=0; fieldnum<inputsize(); fieldnum++)
00545         appendCandidateFunctionsOfSingleField(fieldnum, simple_candidate_functions);
00546     
00547     if(kernels.length()>0)
00548         appendKernelFunctions(simple_candidate_functions);
00549 }
00550 
00551 void BasisSelectionRegressor::buildAllCandidateFunctions()
00552 {
00553     if(selected_functions.length()==0)
00554     {
00555         candidate_functions= mandatory_functions.copy();
00556         while(candidate_functions.length() > 0)
00557             appendFunctionToSelection(0);
00558     }
00559 
00560     if(simple_candidate_functions.length()==0)
00561         buildSimpleCandidateFunctions();
00562 
00563     candidate_functions = simple_candidate_functions.copy();
00564     TVec<RealFunc> interaction_candidate_functions;
00565 
00566     int candidate_start = consider_constant_function ? 1 : 0; // skip bias
00567     int ncandidates = candidate_functions.length();
00568     int nselected = selected_functions.length();
00569     if (nselected > 0  &&  consider_interaction_terms)
00570     {
00571         TVec<RealFunc> top_candidate_functions = simple_candidate_functions.copy();
00572         int start = candidate_start;
00573         if (consider_n_best_for_interaction > 0  &&  ncandidates > consider_n_best_for_interaction)
00574         {
00575             top_candidate_functions = buildTopCandidateFunctions();
00576             start = 0;
00577         }
00578 
00579         for (int k=0; k<nselected; k++)
00580         {
00581             for (int j=start; j<top_candidate_functions.length(); j++)
00582             {
00583                 addInteractionFunction(selected_functions[k], top_candidate_functions[j], interaction_candidate_functions);
00584             }
00585         }
00586     }
00587 
00588     // Build explicit_interaction_functions from explicit_interaction_variables
00589     explicit_interaction_functions.resize(0);
00590     for(int k=0; k<explicit_interaction_variables.length(); ++k)
00591         appendCandidateFunctionsOfSingleField(train_set->getFieldIndex(explicit_interaction_variables[k]), explicit_interaction_functions);
00592 
00593     // Add interaction_candidate_functions from explicit_interaction_functions
00594     for(int k= 0; k < explicit_interaction_functions.length(); ++k)
00595     {
00596         for(int j=candidate_start; j<ncandidates; ++j)
00597         {
00598             addInteractionFunction(explicit_interaction_functions[k], simple_candidate_functions[j], interaction_candidate_functions);
00599         }
00600     }
00601 
00602     // If too many interaction_candidate_functions, we choose them at random
00603     if(max_interaction_terms > 0  &&  interaction_candidate_functions.length() > max_interaction_terms)
00604     {
00605         shuffleElements(interaction_candidate_functions);
00606         interaction_candidate_functions.resize(max_interaction_terms);
00607     }
00608     candidate_functions.append(interaction_candidate_functions);
00609 
00610     // If use_all_basis, append all candidate_functions to selected_functions
00611     if (use_all_basis)
00612     {
00613         while (candidate_functions.length() > 0)
00614             appendFunctionToSelection(0);
00615     }
00616 }
00617 
00618 void BasisSelectionRegressor::addInteractionFunction(RealFunc& f1, RealFunc& f2, TVec<RealFunc>& all_functions)
00619 {
00620     // Check that feature in f2 don't exceed "interaction_max_order" of that feature in f1
00621     // Note that f2 should be a new function to be added (and thus an instance of RealFunctionOfInputFeature)
00622     if (interaction_max_order > 0)
00623     {
00624         int order = 0;
00625         computeOrder(f1, order);
00626         computeOrder(f2, order);
00627         if (order > interaction_max_order)
00628             return;
00629     }
00630 
00631     RealFunc f = new RealFunctionProduct(f1, f2);
00632     f->setInfo("(" + f1->getInfo() + "*" + f2->getInfo() + ")");
00633     all_functions.append(f);
00634 }
00635 
00636 void BasisSelectionRegressor::computeOrder(RealFunc& func, int& order)
00637 {
00638     if (dynamic_cast<RealFunctionOfInputFeature*>((RealFunction*) func))
00639     {
00640         ++order;
00641     }
00642     else if (RealFunctionProduct* f = dynamic_cast<RealFunctionProduct*>((RealFunction*) func))
00643     {
00644         computeOrder(f->f1, order);
00645         computeOrder(f->f2, order);
00646     }
00647     else
00648         PLERROR("In BasisSelectionRegressor::computeOrder: bad function type.");
00649 }
00650 
00651 TVec<RealFunc> BasisSelectionRegressor::buildTopCandidateFunctions()
00652 {
00653     // The scores matrix should match (in size) the candidate_functions matrix
00654     PLASSERT(scores.length() == candidate_functions.length());
00655 
00656     sortRows(scores, 1, false);
00657     TVec<RealFunc> top_best_functions;
00658     for (int i=0; i<consider_n_best_for_interaction; i++)
00659         top_best_functions.append(simple_candidate_functions[(int)scores(i,0)]);
00660 
00661     return top_best_functions;
00662 }
00663 
00664 /* Returns the index of the most correlated (or anti-correlated) feature 
00665 among the full candidate features. 
00666 */
00667 void BasisSelectionRegressor::findBestCandidateFunction(int& best_candidate_index, real& best_score) const
00668 {
00669     int n_candidates = candidate_functions.size();
00670     Vec E_x;
00671     Vec E_xx;
00672     Vec E_xy;
00673     real wsum = 0;
00674     real E_y = 0;
00675     real E_yy = 0;
00676 
00677     computeWeightedAveragesWithResidue(candidate_functions, wsum, E_x, E_xx, E_y, E_yy, E_xy);
00678     
00679     scores.resize(simple_candidate_functions.length(), 2);
00680 
00681     if(verbosity>=5)
00682         perr << "n_candidates = " << n_candidates << endl;
00683 
00684     if(verbosity>=10)
00685         perr << "E_xy = " << E_xy << endl;
00686     best_candidate_index = -1;
00687     best_score = 0;
00688 
00689     for(int j=0; j<n_candidates; j++)
00690     {
00691         real score = 0;
00692         if(normalize_features)
00693             score = fabs((E_xy[j]-E_y*E_x[j])/(1e-6+sqrt(E_xx[j]-square(E_x[j]))));
00694         else
00695             score = fabs(E_xy[j]);
00696         if(verbosity>=10)
00697             perr << score << ' ';
00698         if(score>best_score)
00699         {
00700             best_candidate_index = j;
00701             best_score = score;
00702         }
00703         // we keep the score only for the simple_candidate_functions
00704         if (j < simple_candidate_functions.length())
00705         {
00706             scores(j, 0) = j;
00707             scores(j, 1) = score;
00708         }
00709     }
00710 
00711     if(verbosity>=10)
00712         perr << endl;
00713 }
00714 
00715 
00716 //function-object for a thread
00717 struct BasisSelectionRegressor::thread_wawr
00718 {
00719     int thread_id, n_threads;
00720     const TVec<RealFunc>& functions;
00721     real& wsum;
00722     Vec& E_x;
00723     Vec& E_xx;
00724     real& E_y;
00725     real& E_yy;
00726     Vec& E_xy;
00727     const Vec& Y;
00728     boost::mutex& ts_mx;
00729     const VMat& train_set;
00730     boost::mutex& pb_mx;
00731     PP<ProgressBar> pb;
00732     int thread_subtrain_length;
00733 
00734     thread_wawr(int thread_id_, int n_threads_,
00735                 const TVec<RealFunc>& functions_,  
00736                 real& wsum_,
00737                 Vec& E_x_, Vec& E_xx_,
00738                 real& E_y_, real& E_yy_,
00739                 Vec& E_xy_, const Vec& Y_, boost::mutex& ts_mx_,
00740                 const VMat& train_set_,
00741                 boost::mutex& pb_mx_,
00742                 PP<ProgressBar> pb_,
00743                 int thread_subtrain_length_)
00744         : thread_id(thread_id_), 
00745           n_threads(n_threads_),
00746           functions(functions_),
00747           wsum(wsum_),
00748           E_x(E_x_),
00749           E_xx(E_xx_),
00750           E_y(E_y_),
00751           E_yy(E_yy_),
00752           E_xy(E_xy_),
00753           Y(Y_),
00754           ts_mx(ts_mx_),
00755           train_set(train_set_),
00756           pb_mx(pb_mx_),
00757           pb(pb_),
00758           thread_subtrain_length(thread_subtrain_length_)
00759     {}
00760 
00761     void operator()()
00762     {
00763         Vec input, targ;
00764         real w;
00765         Vec candidate_features;
00766         int n_candidates = functions.length();
00767         int train_len = train_set->length();
00768      
00769         E_x.resize(n_candidates);
00770         E_x.fill(0.);
00771         E_xx.resize(n_candidates);
00772         E_xx.fill(0.);
00773         E_y = 0.;
00774         E_yy = 0.;
00775         E_xy.resize(n_candidates);
00776         E_xy.fill(0.);
00777         wsum = 0.;
00778 
00779         // Used when thread_subtrain_length > 1
00780         Mat all_inputs;
00781         Vec all_w;
00782         int input_size = train_set->inputsize();
00783         if (thread_subtrain_length > 1)
00784         {
00785             // pre-allocate memory
00786             all_inputs.resize(thread_subtrain_length, input_size);
00787             all_w.resize(thread_subtrain_length);
00788         }
00789    
00790         for(int i=thread_id; i<train_len; i+= n_threads)
00791         {
00792             if (thread_subtrain_length > 1)
00793             {
00794                 int j = (i-thread_id)/n_threads;
00795                 int j_mod = j % thread_subtrain_length;
00796                 if (j_mod == 0)  // on doit faire le plein de donnees
00797                 {
00798                     all_inputs.resize(0, input_size);
00799                     all_w.resize(0);
00800 
00801                     boost::mutex::scoped_lock lock(ts_mx);
00802                     int max_train = min(train_len, i + thread_subtrain_length*n_threads);
00803                     for (int ii=i; ii<max_train; ii+= n_threads)
00804                     {
00805                         train_set->getExample(ii, input, targ, w);
00806                         all_inputs.appendRow(input);
00807                         all_w.append(w);
00808                     }
00809                 }
00810                 input = all_inputs(j_mod);
00811                 w = all_w[j_mod];
00812             }
00813             else
00814             {
00815                 boost::mutex::scoped_lock lock(ts_mx);
00816                 train_set->getExample(i, input, targ, w);
00817             }
00818             evaluate_functions(functions, input, candidate_features);
00819             wsum += w;
00820             real y = Y[i];
00821             real wy = w*y;
00822             E_y  += wy;
00823             E_yy  += wy*y;
00824             for(int j=0; j<n_candidates; j++)
00825             {
00826                 real x = candidate_features[j];
00827                 real wx = w*x;
00828                 E_x[j] += wx;
00829                 E_xx[j] += wx*x;
00830                 E_xy[j] += wx*y;
00831             }
00832             if(pb)
00833             {
00834                 boost::mutex::scoped_lock lock(pb_mx);
00835                 if(pb->currentpos < static_cast<unsigned int>(i))
00836                     pb->update(i);
00837             }
00838         }
00839     }
00840 };
00841 
00842 
00843 void BasisSelectionRegressor::computeWeightedAveragesWithResidue(const TVec<RealFunc>& functions,  
00844                                                                  real& wsum,
00845                                                                  Vec& E_x, Vec& E_xx,
00846                                                                  real& E_y, real& E_yy,
00847                                                                  Vec& E_xy) const
00848 {
00849     const Vec& Y = residue;
00850     int n_candidates = functions.length();
00851     E_x.resize(n_candidates);
00852     E_x.fill(0.);
00853     E_xx.resize(n_candidates);
00854     E_xx.fill(0.);
00855     E_y = 0.;
00856     E_yy = 0.;
00857     E_xy.resize(n_candidates);
00858     E_xy.fill(0.);
00859     wsum = 0;
00860 
00861     Vec candidate_features;
00862     real w;
00863     int l = train_set->length();
00864 
00865     PP<ProgressBar> pb;
00866     if(report_progress)
00867         pb = new ProgressBar("Computing residue scores for " + tostring(n_candidates) + " candidate functions", l);
00868 
00869     if(n_threads > 0)
00870     {
00871         Vec wsums(n_threads);
00872         TVec<Vec> E_xs(n_threads);
00873         TVec<Vec> E_xxs(n_threads);
00874         Vec E_ys(n_threads);
00875         Vec E_yys(n_threads);
00876         TVec<Vec> E_xys(n_threads);
00877         boost::mutex ts_mx, pb_mx;
00878         TVec<boost::thread* > threads(n_threads);
00879         TVec<thread_wawr* > tws(n_threads);
00880 
00881         for(int i= 0; i < n_threads; ++i)
00882         {
00883             tws[i]= new thread_wawr(i, n_threads, functions, 
00884                                     wsums[i], 
00885                                     E_xs[i], E_xxs[i],
00886                                     E_ys[i], E_yys[i],
00887                                     E_xys[i], Y, ts_mx, train_set,
00888                                     pb_mx, pb, thread_subtrain_length);
00889             threads[i]= new boost::thread(*tws[i]);
00890         }
00891         for(int i= 0; i < n_threads; ++i)
00892         {
00893             threads[i]->join();
00894             wsum+= wsums[i];
00895             E_y+= E_ys[i];
00896             E_yy+= E_yys[i];
00897             for(int j= 0; j < n_candidates; ++j)
00898             {
00899                 E_x[j]+= E_xs[i][j];
00900                 E_xx[j]+= E_xxs[i][j];
00901                 E_xy[j]+= E_xys[i][j];
00902             }
00903             delete threads[i];
00904             delete tws[i];
00905         }
00906     }
00907     else // single-thread version
00908     {
00909         for(int i=0; i<l; i++)
00910         {
00911             real y = Y[i];
00912             train_set->getExample(i, input, targ, w);
00913             evaluate_functions(functions, input, candidate_features);
00914             wsum += w;
00915             real wy = w*y;
00916             E_y  += wy;
00917             E_yy  += wy*y;
00918             for(int j=0; j<n_candidates; j++)
00919             {
00920                 real x = candidate_features[j];
00921                 real wx = w*x;
00922                 E_x[j] += wx;
00923                 E_xx[j] += wx*x;
00924                 E_xy[j] += wx*y;
00925             }
00926             if(pb)
00927                 pb->update(i);
00928         }
00929     }
00930 
00931     // Finalize computation
00932     real inv_wsum = 1.0/wsum;
00933     E_x  *= inv_wsum;
00934     E_xx *= inv_wsum;
00935     E_y  *= inv_wsum;
00936     E_yy *= inv_wsum;
00937     E_xy *= inv_wsum;
00938 
00939 }
00940 
00941 
00942 /*
00943 void BasisSelectionRegressor::computeWeightedCorrelationsWithY(const TVec<RealFunc>& functions, const Vec& Y,  
00944                                                                real& wsum,
00945                                                                Vec& E_x, Vec& V_x,
00946                                                                real& E_y, real& V_y,
00947                                                                Vec& E_xy, Vec& V_xy,
00948                                                                Vec& covar, Vec& correl, real min_variance) const
00949 {
00950     int n_candidates = functions.length();
00951     E_x.resize(n_candidates);
00952     E_x.fill(0.);
00953     V_x.resize(n_candidates);
00954     V_x.fill(0.);
00955     E_y = 0.;
00956     V_y = 0.;
00957     E_xy.resize(n_candidates);
00958     E_xy.fill(0.);
00959     V_xy.resize(n_candidates);
00960     V_xy.fill(0.);
00961     wsum = 0;
00962 
00963     Vec candidate_features;
00964     real w;
00965     int l = train_set->length();
00966     for(int i=0; i<l; i++)
00967     {
00968         real y = Y[i];
00969         train_set->getExample(i, input, targ, w);
00970         evaluate_functions(functions, input, candidate_features);
00971         wsum += w;
00972         E_y  += w*y;
00973         V_y  += w*y*y;
00974         for(int j=0; j<n_candidates; j++)
00975         {
00976             real x = candidate_features[j];
00977             E_x[j] += w*x;
00978             V_x[j] += w*x*x;
00979             real xy = x*y;
00980             E_xy[j] += w*xy;
00981             V_xy[j] += w*xy*xy;
00982         }
00983     }
00984 
00985     // Finalize computation
00986     real inv_wsum = 1.0/wsum;
00987     E_y *= inv_wsum;
00988     V_y  = V_y*inv_wsum - square(E_y);
00989     if(V_y<min_variance)
00990         V_y = min_variance;
00991     covar.resize(n_candidates);
00992     correl.resize(n_candidates);
00993     for(int j=0; j<n_candidates; j++)
00994     {
00995         real E_x_j = E_x[j]*inv_wsum;
00996         E_x[j] = E_x_j;
00997         real V_x_j = V_x[j]*inv_wsum - square(E_x_j);
00998         if(V_x_j<min_variance)
00999             V_x_j = min_variance;
01000         V_x[j] = V_x_j;
01001         real E_xy_j = E_xy[j]*inv_wsum;
01002         E_xy[j] = E_xy_j;
01003         real V_xy_j = V_xy[j]*inv_wsum - square(E_xy_j);
01004         V_xy[j] = V_xy_j;
01005         real covar_j = E_xy_j - square(E_x_j);
01006         real correl_j = covar_j/sqrt(V_x_j*V_y);
01007         covar[j] = covar_j;
01008         correl[j] = correl_j;
01009     }
01010 }
01011 
01012 
01015 void weighted_XY_statistics(const Vec& X, const Vec& Y, const Vec& W, 
01016                                   real& wsum,
01017                                   real& E_x, real& V_x,
01018                                   real& E_y, real& V_y,
01019                                   real& E_xy, real& V_xy,
01020                                   real& covar, real& correl)
01021 {
01022     E_x = 0;
01023     V_x = 0;
01024     E_y = 0;
01025     V_y = 0;
01026     E_xy = 0;
01027     V_xy = 0;
01028     wsum = 0;
01029 
01030     const real* pX = X.data();
01031     const real* pY = Y.data();
01032     const real* pW = W.data();
01033 
01034     int l = X.length();
01035     while(l--)
01036     {
01037         real x = *pX++;
01038         real y = *pY++;
01039         real w = *pW++;
01040         wsum += w;
01041         E_x  += w*x;
01042         V_x  += w*x*x;
01043         E_y  += w*y;
01044         V_y  += w*y*y;
01045         real xy = x*y;
01046         E_xy += w*xy;
01047         V_xy += w*xy*xy;
01048     }
01049     real inv_wsum = 1.0/wsum;
01050     E_x  *= inv_wsum;
01051     V_x  = V_x*inv_wsum - E_x*E_x;
01052     E_y  *= inv_wsum;
01053     V_y  = V_y*inv_wsum - E_y*E_y;
01054     E_xy = E_xy*inv_wsum;
01055     V_xy = V_xy*inv_wsum - E_xy*E_xy;
01056 
01057     covar = E_xy - E_x*E_y;
01058     correl = covar/sqrt(V_x*V_y);
01059 }
01060 */
01061 
01062 void BasisSelectionRegressor::appendFunctionToSelection(int candidate_index)
01063 {
01064     RealFunc f = candidate_functions[candidate_index];
01065     if(precompute_features)
01066     {
01067         int l = train_set->length();
01068         int nf = selected_functions.length();
01069         features.resize(l,nf+1, max(1,static_cast<int>(0.25*l*nf)),true);  // enlarge width while preserving content
01070         real weight;
01071         for(int i=0; i<l; i++)
01072         {
01073             train_set->getExample(i,input,targ,weight);
01074             features(i,nf) = f->evaluate(input);
01075         }
01076     }
01077     selected_functions.append(f);
01078 
01079     if(!consider_interaction_terms)
01080         candidate_functions.remove(candidate_index);
01081     else
01082         buildAllCandidateFunctions();
01083 }
01084 
01085 void BasisSelectionRegressor::retrainLearner()
01086 {    
01087     int l  = train_set->length();
01088     int nf = selected_functions.length();
01089     bool weighted = train_set->hasWeights();
01090 
01091     // set dummy training set, so that undelying learner frees reference to previous training set
01092     /*
01093     VMat newtrainset = new MemoryVMatrix(1,nf+(weighted?2:1));
01094     newtrainset->defineSizes(nf,1,weighted?1:0);
01095     learner->setTrainingSet(newtrainset);
01096     learner->forget();
01097     */
01098 
01099     // Deep-copy the underlying learner
01100     CopiesMap copies;
01101     learner = template_learner->deepCopy(copies);
01102     PP<VecStatsCollector> statscol = template_learner->getTrainStatsCollector();
01103     learner->setTrainStatsCollector(statscol);
01104     PPath expdir = template_learner->getExperimentDirectory();
01105     learner->setExperimentDirectory(expdir);
01106 
01107     VMat newtrainset;
01108     if(precompute_features)
01109     {
01110         features.resize(l,nf+(weighted?2:1), max(1,int(0.25*l*nf)), true); // enlarge width while preserving content
01111         if(weighted)
01112         {
01113             for(int i=0; i<l; i++) // append target and weight columns to features matrix
01114             {
01115                 features(i,nf) = targets[i];
01116                 features(i,nf+1) = weights[i];
01117             }
01118         }
01119         else // no weights
01120             features.lastColumn() << targets; // append target column to features matrix
01121     
01122         newtrainset = new MemoryVMatrix(features);
01123     }
01124     else
01125         newtrainset= new RealFunctionsProcessedVMatrix(train_set, selected_functions, false, true, true);
01126     newtrainset->defineSizes(nf,1,weighted?1:0);
01127     learner->setTrainingSet(newtrainset);
01128     template_learner->setTrainingSet(newtrainset);
01129     learner->forget();
01130     learner->train();
01131     // resize features matrix so it contains only the features
01132     if(precompute_features)
01133         features.resize(l,nf);
01134 }
01135 
01136 
01137 void BasisSelectionRegressor::train()
01138 {
01139     if(nstages > 0)
01140     {
01141         if (!initTrain())
01142             return;
01143     } // work around so that nstages can be zero...
01144     else if (!train_stats)
01145         train_stats = new VecStatsCollector();
01146 
01147     if(stage==0)
01148     {
01149         simple_candidate_functions.resize(0);
01150         buildAllCandidateFunctions();
01151     }
01152 
01153     while(stage<nstages)
01154     {
01155         if(targets.length()==0)
01156         {
01157             initTargetsResidueWeight();
01158             if(selected_functions.length()>0)
01159             {
01160                 recomputeFeatures();
01161                 if(stage==0) // only mandatory funcs.
01162                     retrainLearner();
01163                 if (candidate_functions.length()>0)
01164                     recomputeResidue();
01165             }
01166         }
01167 
01168         if(candidate_functions.length()>0)
01169         {
01170             int best_candidate_index = -1;
01171             real best_score = 0;
01172             findBestCandidateFunction(best_candidate_index, best_score);
01173             if(verbosity>=2)
01174                 perr << "\n\n*** Stage " << stage << " *****" << endl
01175                      << "Best candidate: index=" << best_candidate_index << endl
01176                      << "  score=" << best_score << endl;
01177             if(best_candidate_index>=0)
01178             {
01179                 if(verbosity>=2)
01180                     perr << "  function info = " << candidate_functions[best_candidate_index]->getInfo() << endl;
01181                 if(verbosity>=3)
01182                     perr << "  function= " << candidate_functions[best_candidate_index] << endl;
01183                 appendFunctionToSelection(best_candidate_index);
01184 
01185                 if(verbosity>=2)
01186                     perr << "residue_sum_sq before retrain: " << residue_sum_sq << endl;
01187                 retrainLearner();
01188                 recomputeResidue();
01189                 if(verbosity>=2)
01190                     perr << "residue_sum_sq after retrain: " << residue_sum_sq << endl;
01191             }
01192         }
01193         else
01194         {
01195             if(verbosity>=2)
01196                 perr << "\n\n*** Stage " << stage << " : no more candidate functions. *****" << endl;
01197         }
01198         ++stage;
01199     }
01200 }
01201 
01202 void BasisSelectionRegressor::initTargetsResidueWeight()
01203 {
01204     int l = train_set.length();
01205     residue.resize(l);
01206     targets.resize(l);
01207     residue_sum = 0.;
01208     residue_sum_sq = 0.;
01209     weights.resize(l);
01210 
01211     real w;
01212     for(int i=0; i<l; i++)
01213     {
01214         train_set->getExample(i, input, targ, w);
01215         real t = targ[0];
01216         targets[i] = t;
01217         residue[i] = t;
01218         weights[i] = w;
01219         residue_sum += w*t;
01220         residue_sum_sq += w*square(t);
01221     }
01222 }
01223 
01224 void BasisSelectionRegressor::recomputeFeatures()
01225 {
01226     if(!precompute_features)
01227         return;
01228     int l = train_set.length();
01229     int nf = selected_functions.length();
01230     features.resize(l,nf);
01231     real weight = 0;
01232     for(int i=0; i<l; i++)
01233     {
01234         train_set->getExample(i, input, targ, weight);
01235         Vec v = features(i);
01236         evaluate_functions(selected_functions, input, v);
01237     }    
01238 }
01239 
01240 void BasisSelectionRegressor::recomputeResidue()
01241 {
01242     int l = train_set.length();
01243     residue.resize(l);
01244     residue_sum = 0;
01245     residue_sum_sq = 0;
01246     Vec output(outputsize());
01247     // perr << "recomp_residue: { ";
01248     for(int i=0; i<l; i++)
01249     {
01250         real t = targets[i];
01251         real w = weights[i];
01252         if(precompute_features)
01253             computeOutputFromFeaturevec(features(i),output);
01254         else
01255         {
01256             real wt;
01257             train_set->getExample(i,input,targ,wt);
01258             computeOutput(input,output);
01259         }
01260 
01261         real resid = t-output[0];
01262         residue[i] = resid;
01263         // perr << "feature " << i << ": " << features(i) << " t:" << t << " out: " << output[0] << " resid: " << residue[i] << endl;
01264         residue_sum += resid;
01265         residue_sum_sq += w*square(resid);
01266     }
01267     // perr << "}" << endl;
01268     // perr << "targets: \n" << targets << endl;
01269     // perr << "residue: \n" << residue << endl;
01270 }
01271 
01272 void BasisSelectionRegressor::computeOutputFromFeaturevec(const Vec& featurevec, Vec& output) const
01273 {
01274     int nout = outputsize();
01275     if(nout!=1 && !use_all_basis)
01276         PLERROR("outputsize should always be 1 for this learner (=%d)", nout);
01277     output.resize(nout);
01278 
01279     if(learner.isNull())
01280         output[0] = dot(alphas, featurevec);
01281     else
01282         learner->computeOutput(featurevec, output);
01283 }
01284 
01285 void BasisSelectionRegressor::computeOutput(const Vec& input, Vec& output) const
01286 {
01287     evaluate_functions(selected_functions, input, featurevec);
01288     computeOutputFromFeaturevec(featurevec, output);
01289 }
01290 
01291 void BasisSelectionRegressor::printModelFunction(PStream& out) const
01292 {
01293     for(int k=0; k<selected_functions.length(); k++)
01294     {
01295         out << "+ " << alphas[k] << "* " << selected_functions[k];
01296         out << endl;
01297     }
01298 }
01299 
01300 void BasisSelectionRegressor::computeCostsFromOutputs(const Vec& input, const Vec& output,
01301                                            const Vec& targ, Vec& costs) const
01302 {
01303     costs.resize(1);
01304     costs[0] = square(output[0]-targ[0]);
01305 }
01306 
01307 TVec<string> BasisSelectionRegressor::getTestCostNames() const
01308 {
01309     return TVec<string>(1,string("mse"));
01310 }
01311 
01312 void BasisSelectionRegressor::setTrainStatsCollector(PP<VecStatsCollector> statscol)
01313 { 
01314     inherited::setTrainStatsCollector(statscol);
01315     template_learner->setTrainStatsCollector(statscol);
01316 }
01317 
01318 
01319 TVec<string> BasisSelectionRegressor::getTrainCostNames() const
01320 {
01321     return template_learner->getTrainCostNames();
01322 }
01323 
01324 void BasisSelectionRegressor::setTrainingSet(VMat training_set, bool call_forget)
01325 {
01326     inherited::setTrainingSet(training_set, call_forget);
01327     template_learner->setTrainingSet(training_set, call_forget);
01328 }
01329 
01330 
01331 } // end of namespace PLearn
01332 
01333 
01334 /*
01335   Local Variables:
01336   mode:c++
01337   c-basic-offset:4
01338   c-file-style:"stroustrup"
01339   c-file-offsets:((innamespace . 0)(inline-open . 0))
01340   indent-tabs-mode:nil
01341   fill-column:79
01342   End:
01343 */
01344 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines