PLearn 0.1
LocalMedBoost.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // LocalMedBoost.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 /* ********************************************************************************    
00037  * $Id: LocalMedBoost.cc, v 1.0 2004/07/19 10:00:00 Bengio/Kegl/Godbout              *
00038  * This file is part of the PLearn library.                                     *
00039  ******************************************************************************** */
00040 
00041 #include "LocalMedBoost.h"
00042 #include "RegressionTree.h"
00043 #include "RegressionTreeRegisters.h"
00044 #include "BaseRegressorWrapper.h"
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(LocalMedBoost,
00050                         "Confidence-rated Regression by Localised Median Boosting",
00051                         "Robust regression by boosting the median\n"
00052                         "It implements the algorithm described in the paper: Robust Regression by Boosting the Median from professor Kegl.\n"
00053                         "It requires a base regressor that can separate a confidence function from the real output.\n"
00054                         "The base regressor must minimize a loss function in the form described in the paper.\n"
00055                         "It needs a loss_function_weight parameter used by the base regressor to computes its loss function.\n"
00056                         "Currently, a RegressionTree algorithm is implemented to serve as a base regressor.\n"
00057     );
00058 
00059 LocalMedBoost::LocalMedBoost()     
00060     : robustness(0.1),
00061       adapt_robustness_factor(0.0),
00062       loss_function_weight(1.0),
00063       objective_function("l2"),
00064       regression_tree(1),
00065       max_nstages(1)
00066 {
00067 }
00068 
00069 LocalMedBoost::~LocalMedBoost()
00070 {
00071 }
00072 
00073 void LocalMedBoost::declareOptions(OptionList& ol)
00074 {
00075     declareOption(ol, "robustness", &LocalMedBoost::robustness, OptionBase::buildoption,
00076                   "The robustness parameter of the boosting algorithm.\n");
00077     declareOption(ol, "adapt_robustness_factor", &LocalMedBoost::adapt_robustness_factor, OptionBase::buildoption,
00078                   "If not 0.0, robustness will be adapted at each stage with max(t)min(i) base_award + this constant.\n");
00079     declareOption(ol, "loss_function_weight", &LocalMedBoost::loss_function_weight, OptionBase::buildoption,
00080                   "The hyper parameter to balance the error and the confidence factor\n");  
00081     declareOption(ol, "objective_function", &LocalMedBoost::objective_function, OptionBase::buildoption,
00082                   "Indicates which of the base reward to use. default is l2 and the other posibility is l1.\n"
00083                   "Normally it should be consistent with the objective function optimised by the base regressor.\n"); 
00084     declareOption(ol, "regression_tree", &LocalMedBoost::regression_tree, OptionBase::buildoption,
00085                   "If set to 1, the tree_regressor_template is used instead of the base_regressor_template.\n"
00086                   "It permits to sort the train set only once for all boosting iterations.\n");   
00087     declareOption(ol, "max_nstages", &LocalMedBoost::max_nstages, OptionBase::buildoption,
00088                   "Maximum number of nstages in the hyper learner to size the vectors of base learners.\n"
00089                   "(If smaller than nstages, nstages is used)");
00090     declareOption(ol, "base_regressor_template", &LocalMedBoost::base_regressor_template, OptionBase::buildoption,
00091                   "The template for the base regressor to be boosted (used if the regression_tree option is set to 0).\n");   
00092     declareOption(ol, "tree_regressor_template", &LocalMedBoost::tree_regressor_template, OptionBase::buildoption,
00093                   "The template for a RegressionTree base regressor when the regression_tree option is set to 1.\n");  
00094     declareOption(ol, "tree_wrapper_template", &LocalMedBoost::tree_wrapper_template, OptionBase::buildoption,
00095                   "The template for a RegressionTree base regressor to be boosted thru a wrapper."
00096                   "This is useful when you want to used a different confidence function."
00097                   "The regression_tree option needs to be set to 2.\n");
00098  
00099     declareOption(ol, "end_stage", &LocalMedBoost::end_stage, OptionBase::learntoption,
00100                   "The last train stage after end of training\n");
00101     declareOption(ol, "bound", &LocalMedBoost::bound, OptionBase::learntoption,
00102                   "Cumulative bound computed after each boosting stage\n");
00103     declareOption(ol, "maxt_base_award", &LocalMedBoost::maxt_base_award, OptionBase::learntoption,
00104                   "max(t)min(i) base_award kept to adapt robustness at each stage.\n");
00105     declareOption(ol, "sorted_train_set", &LocalMedBoost::sorted_train_set, OptionBase::learntoption,
00106                   "A sorted train set when using a tree as a base regressor\n");
00107     declareOption(ol, "base_regressors", &LocalMedBoost::base_regressors, OptionBase::learntoption,
00108                   "The vector of base regressors built by the training at each boosting stage\n");
00109     declareOption(ol, "function_weights", &LocalMedBoost::function_weights, OptionBase::learntoption,
00110                   "The array of function weights built by the boosting algorithm\n");
00111     declareOption(ol, "loss_function", &LocalMedBoost::loss_function, OptionBase::learntoption,
00112                   "The array of loss_function values built by the boosting algorithm\n");
00113     declareOption(ol, "sample_weights", &LocalMedBoost::sample_weights, OptionBase::learntoption,
00114                   "The array to represent different distributions on the samples of the training set.\n");
00115     inherited::declareOptions(ol);
00116 }
00117 
00118 void LocalMedBoost::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00119 {
00120     inherited::makeDeepCopyFromShallowCopy(copies);
00121     deepCopyField(robustness, copies);
00122     deepCopyField(adapt_robustness_factor, copies);
00123     deepCopyField(loss_function_weight, copies);
00124     deepCopyField(objective_function, copies);
00125     deepCopyField(regression_tree, copies);
00126     deepCopyField(max_nstages, copies);
00127     deepCopyField(base_regressor_template, copies);
00128     deepCopyField(tree_regressor_template, copies);
00129     deepCopyField(tree_wrapper_template, copies);
00130     deepCopyField(end_stage, copies);
00131     deepCopyField(bound, copies);
00132     deepCopyField(maxt_base_award, copies);
00133     deepCopyField(sorted_train_set, copies);
00134     deepCopyField(base_regressors, copies);
00135     deepCopyField(function_weights, copies);
00136     deepCopyField(loss_function, copies);
00137     deepCopyField(sample_weights, copies);
00138 }
00139 
00140 void LocalMedBoost::build()
00141 {
00142     inherited::build();
00143     build_();
00144 }
00145 
00146 void LocalMedBoost::build_()
00147 {
00148     if (train_set)
00149     {
00150         length = train_set->length();
00151         width = train_set->width();
00152         if (length < 2) PLERROR("LocalMedBoost: the training set must contain at least two samples, got %d", length);
00153         inputsize = train_set->inputsize();
00154         targetsize = train_set->targetsize();
00155         weightsize = train_set->weightsize();
00156         if (inputsize < 1) PLERROR("LocalMedBoost: expected  inputsize greater than 0, got %d", inputsize);
00157         if (targetsize != 1) PLERROR("LocalMedBoost: expected targetsize to be 1, got %d", targetsize);
00158         if (weightsize != 1) PLERROR("LocalMedBoost: expected weightsize to be 1, got %d", weightsize);
00159         sample_input.resize(inputsize);
00160         sample_target.resize(targetsize);
00161         sample_output.resize(4);
00162         sample_costs.resize(6);
00163         sample_weights.resize(length);
00164         base_rewards.resize(length);
00165         base_confidences.resize(length);
00166         base_awards.resize(length);
00167         exp_weighted_edges.resize(length);
00168         if (max_nstages < nstages) max_nstages = nstages;
00169     } 
00170 }
00171 
00172 void LocalMedBoost::train()
00173 {
00174     if (!train_set) PLERROR("LocalMedBoost: the learner has not been properly built");
00175     if (stage == 0)
00176     {
00177         base_regressors.resize(max_nstages);
00178         tree_regressors.resize(max_nstages);
00179         tree_wrappers.resize(max_nstages);
00180         function_weights.resize(max_nstages);
00181         loss_function.resize(max_nstages);
00182         initializeSampleWeight();
00183         initializeLineSearch();
00184         bound = 1.0;
00185         if (regression_tree > 0)
00186             sorted_train_set = new RegressionTreeRegisters(train_set,
00187                                                            report_progress,
00188                                                            verbosity);
00189     }
00190     PP<ProgressBar> pb;
00191     if (report_progress) pb = new ProgressBar("LocalMedBoost: train stages: ", nstages);
00192     for (; stage < nstages; stage++)
00193     {
00194         verbose("LocalMedBoost: The base regressor is being trained at stage: " + tostring(stage), 4);
00195         if (regression_tree > 0)
00196         {
00197             if (regression_tree == 1)
00198             {
00199                 tree_regressors[stage] = ::PLearn::deepCopy(tree_regressor_template);
00200                 tree_regressors[stage]->setTrainingSet(VMat(sorted_train_set));
00201                 base_regressors[stage] = tree_regressors[stage];
00202             }
00203             else
00204             {
00205                 tree_wrappers[stage] = ::PLearn::deepCopy(tree_wrapper_template);
00206                 tree_wrappers[stage]->setSortedTrainSet(sorted_train_set);
00207                 base_regressors[stage] = tree_wrappers[stage];
00208             }
00209         }
00210         else
00211         {
00212             base_regressors[stage] = ::PLearn::deepCopy(base_regressor_template);
00213         }
00214         base_regressors[stage]->setOption("loss_function_weight", tostring(loss_function_weight));
00215         base_regressors[stage]->setTrainingSet(train_set, true);
00216         base_regressors[stage]->setTrainStatsCollector(new VecStatsCollector);
00217         base_regressors[stage]->train();
00218         end_stage = stage + 1;
00219         computeBaseAwards();
00220         if (capacity_too_large)
00221         {
00222             verbose("LocalMedBoost: capacity too large, each base awards smaller than robustness: " + tostring(robustness), 2);
00223         }
00224         if (capacity_too_small)
00225         {
00226             verbose("LocalMedBoost: capacity too small, edge: " + tostring(edge), 2);
00227         }
00228         function_weights[stage] = findArgminFunctionWeight();
00229         computeLossBound();
00230         verbose("LocalMedBoost: stage: " + tostring(stage) + " alpha: " + tostring(function_weights[stage]) + " robustness: " + tostring(robustness), 3);
00231         if (function_weights[stage] <= 0.0) break;
00232         recomputeSampleWeight();
00233         if (report_progress) pb->update(stage);
00234     }
00235     if (report_progress)
00236     {
00237         pb = new ProgressBar("LocalMedBoost : computing the statistics: ", train_set->length());
00238     } 
00239     train_stats->forget();
00240     min_margin = 1E15;
00241     for (each_train_sample_index = 0; each_train_sample_index < train_set->length(); each_train_sample_index++)
00242     {  
00243         train_set->getExample(each_train_sample_index, sample_input, sample_target, sample_weight);
00244         computeOutput(sample_input, sample_output);
00245         computeCostsFromOutputs(sample_input, sample_output, sample_target, sample_costs); 
00246         train_stats->update(sample_costs);
00247         if (sample_costs[5] < min_margin) min_margin = sample_costs[5];
00248         if (report_progress) pb->update(each_train_sample_index);
00249     }
00250     train_stats->finalize();
00251     verbose("LocalMedBoost: we are done, thank you!", 3);
00252 }
00253 
00254 void LocalMedBoost::computeBaseAwards()
00255 { 
00256     edge = 0.0;
00257     capacity_too_large = true;
00258     capacity_too_small = true;
00259     real mini_base_award = INT_MAX;
00260     int sample_costs_index;
00261     if (objective_function == "l1") sample_costs_index=3;
00262     else sample_costs_index=2;
00263 
00264     for (each_train_sample_index = 0; each_train_sample_index < length; each_train_sample_index++)
00265     {
00266         train_set->getExample(each_train_sample_index, sample_input, sample_target, sample_weight);
00267         base_regressors[stage]->computeOutputAndCosts(sample_input, sample_target, sample_output, sample_costs);
00268         base_rewards[each_train_sample_index] = sample_costs[sample_costs_index];
00269 
00270         base_confidences[each_train_sample_index] = sample_costs[1];
00271         base_awards[each_train_sample_index] = base_rewards[each_train_sample_index] * base_confidences[each_train_sample_index];
00272         if (base_awards[each_train_sample_index] < mini_base_award) mini_base_award = base_awards[each_train_sample_index];
00273         edge += sample_weight * base_awards[each_train_sample_index];
00274         if (base_awards[each_train_sample_index] < robustness) capacity_too_large = false;
00275     }
00276     if (stage == 0) maxt_base_award = mini_base_award;
00277     if (mini_base_award > maxt_base_award) maxt_base_award = mini_base_award;
00278     if (adapt_robustness_factor > 0.0)
00279     {
00280         robustness = maxt_base_award + adapt_robustness_factor;
00281         capacity_too_large = false;
00282     }
00283     if (edge >= robustness)
00284     {
00285         capacity_too_small = false;
00286     }
00287 }
00288 
00289 void LocalMedBoost::computeLossBound()
00290 { 
00291     loss_function[stage] = computeFunctionWeightFormula(function_weights[stage]);
00292     bound *= loss_function[stage];
00293 }
00294 
00295 void LocalMedBoost::initializeLineSearch()
00296 {
00297     bracketing_learning_rate = 1.618034;
00298     bracketing_zero = 1.0e-10;
00299     interpolation_learning_rate = 0.381966;
00300     interpolation_precision = 1.0e-5;
00301     max_learning_rate = 100.0;
00302     bracket_a_start = 0.0;
00303     bracket_b_start = 1.0;
00304 }
00305 
00306 real LocalMedBoost::findArgminFunctionWeight()
00307 {
00308     p_step = bracketing_learning_rate;
00309     p_lim = max_learning_rate;
00310     p_tin = bracketing_zero;
00311     x_a = bracket_a_start;
00312     x_b = bracket_b_start;
00313     f_a = computeFunctionWeightFormula(x_a);
00314     f_b = computeFunctionWeightFormula(x_b);
00315     x_lim = 0.0;
00316     if (f_b > f_a)
00317     {
00318         t_sav = x_a; x_a = x_b; x_b = t_sav;
00319         t_sav = f_a; f_a = f_b; f_b = t_sav;
00320     }
00321     x_c = x_b + p_step * (x_b - x_a);
00322     f_c = computeFunctionWeightFormula(x_c);
00323     while (f_b > f_c)
00324     {
00325         t_r = (x_b - x_a) * (f_b - f_c);
00326         t_q = (x_b - x_c) * (f_b - f_a);
00327         t_sav = t_q - t_r;
00328         if (t_sav < 0.0)
00329         {
00330             t_sav *= -1.0;
00331             if (t_sav < p_tin)
00332             {
00333                 t_sav = p_tin;
00334             }
00335             t_sav *= -1.0;
00336         }
00337         else
00338         {
00339             if (t_sav < p_tin)
00340             {
00341                 t_sav = p_tin;
00342             }      
00343         }
00344         x_u = (x_b - ((x_b - x_c) * t_q) - ((x_b - x_a) * t_r)) / (2 * t_sav);
00345         x_lim = x_b + p_lim * (x_c - x_b);
00346         if(((x_b -x_u) * (x_u - x_c)) > 0.0)
00347         {
00348             f_u = computeFunctionWeightFormula(x_u);
00349             if (f_u < f_c)
00350             {
00351                 x_a = x_b;
00352                 x_b = x_u;
00353                 f_a = f_b;
00354                 f_b = f_u;
00355                 break;
00356             }
00357             else
00358             {
00359                 if (f_u > f_b)
00360                 {
00361                     x_c = x_u;
00362                     f_c = f_u;
00363                     break;
00364                 }       
00365             }
00366             x_u = x_c + p_step * (x_c - x_b);
00367             f_u = computeFunctionWeightFormula(x_u);
00368         }
00369         else
00370         {
00371             if (((x_c -x_u) * (x_u - x_lim)) > 0.0)
00372             {
00373                 f_u = computeFunctionWeightFormula(x_u);
00374                 if (f_u < f_c)
00375                 {
00376                     x_b = x_c; x_c = x_u;
00377                     x_u = x_c + p_step * (x_c - x_b);
00378                     f_b = f_c; f_c = f_u;
00379                     f_u = computeFunctionWeightFormula(x_u);  
00380                 }
00381             }
00382             else
00383             {
00384                 if (((x_u -x_lim) * (x_lim - x_c)) >= 0.0)
00385                 {
00386                     x_u = x_lim;
00387                     f_u = computeFunctionWeightFormula(x_u);
00388                 }
00389                 else
00390                 {
00391                     x_u = x_c + p_step * (x_c - x_b);
00392                     f_u = computeFunctionWeightFormula(x_u);
00393                 }
00394             }
00395         }
00396         x_a = x_b; x_b = x_c; x_c = x_u;
00397         f_a = f_b; f_b = f_c; f_c = f_u;    
00398     }
00399     p_step = interpolation_learning_rate;
00400     p_to1 = interpolation_precision;
00401     x_d = x_e = 0.0;
00402     x_v = x_w = x_x = x_b;
00403     f_v = f_w = f_x = f_b;  
00404     if (x_a < x_c)
00405     {
00406         x_b = x_c;
00407     }
00408     else
00409     {
00410         x_b = x_a;
00411         x_a = x_c;    
00412     }
00413     for (iter = 1; iter <= 100; iter++)
00414     {
00415         x_xmed = 0.5 * (x_a + x_b);
00416         p_tol1 = p_to1 * fabs(x_x) + p_tin;
00417         p_tol2 = 2.0 * p_tol1;
00418         if (fabs(x_x - x_xmed) <= (p_tol2 - 0.5 * (x_b - x_a)))
00419         {
00420             break;
00421         }
00422         if (fabs(x_e) > p_tol1)
00423         {
00424             t_r = (x_x - x_w) * (f_x - f_v);
00425             t_q = (x_x - x_v) * (f_x - f_w);
00426             t_p = (x_x - x_v) * t_q - (x_x - x_w) * t_r;
00427             t_q = 2.0 * (t_q - t_r);
00428             if (t_q > 0.0)
00429             {
00430                 t_p = -t_p;
00431             }
00432             t_q = fabs(t_q);
00433             t_sav= x_e;
00434             x_e = x_d;
00435             if (fabs(t_p) >= fabs(0.5 * t_q * t_sav) || 
00436                 t_p <= t_q * (x_a - x_x) ||
00437                 t_p >= t_q * (x_b - x_x))
00438             {
00439                 if (x_x >= x_xmed)
00440                 {
00441                     x_d = p_step * (x_a - x_x);
00442                 }
00443                 else
00444                 {
00445                     x_d = p_step * (x_b - x_x);
00446                 }
00447             }
00448             else
00449             {
00450                 x_d = t_p / t_q;
00451                 x_u = x_x + x_d;
00452                 if (x_u - x_a < p_tol2 || x_b - x_u < p_tol2)
00453                 {
00454                     x_d = p_tol1;
00455                     if (x_xmed - x_x < 0.0)
00456                     {
00457                         x_d = -x_d;
00458                     }
00459                 }
00460             }
00461         }
00462         else
00463         {
00464             if (x_x >= x_xmed)
00465             {
00466                 x_d = p_step * (x_a - x_x);
00467             }
00468             else
00469             {
00470                 x_d = p_step * (x_b - x_x);
00471             }      
00472         }
00473         if (fabs(x_d) >= p_tol1)
00474         {
00475             x_u = x_x + x_d;
00476         }
00477         else
00478         {
00479             if (x_d < 0.0)
00480             {
00481                 x_u = x_x - p_tol1;
00482             }
00483             else
00484             {
00485                 x_u = x_x + p_tol1;
00486             }
00487         }
00488         f_u = computeFunctionWeightFormula(x_u);
00489         if (f_u <= f_x)
00490         {
00491             if (x_u >= x_x)
00492             {
00493                 x_a = x_x;
00494             }
00495             else
00496             {
00497                 x_b = x_x;
00498             }
00499             x_v = x_w; x_w = x_x; x_x = x_u;
00500             f_v = f_w; f_w = f_x; f_x = f_u;
00501         }
00502         else
00503         {
00504             if (x_u < x_x)
00505             {
00506                 x_a = x_u;
00507             }
00508             else
00509             {
00510                 x_b = x_u;
00511             }
00512             if (f_u <= f_w || x_w == x_x)
00513             {
00514                 x_v = x_w; x_w = x_u;
00515                 f_v = f_w; f_w = f_u;        
00516             }
00517             else
00518             {
00519                 if (f_u <= f_v || x_v == x_x || x_v == x_w)
00520                 {
00521                     x_v = x_u;
00522                     f_v = f_u; 
00523                 }
00524             }
00525         } 
00526     }
00527     return x_x;
00528 }
00529 
00530 real LocalMedBoost::computeFunctionWeightFormula(real alpha)
00531 {
00532     real return_value = 0.0;
00533     for (each_train_sample_index = 0; each_train_sample_index < length; each_train_sample_index++)
00534     {
00535         return_value += sample_weights[each_train_sample_index] * 
00536             exp(-1.0 * alpha * base_awards[each_train_sample_index]);
00537     }
00538     return_value *= safeexp(robustness * alpha);
00539     return return_value;
00540 }
00541 
00542 void LocalMedBoost::initializeSampleWeight()
00543 {
00544     real init_weight = 1.0 / length;
00545     for (each_train_sample_index = 0; each_train_sample_index < length; each_train_sample_index++)
00546     {
00547         sample_weights[each_train_sample_index] = init_weight;
00548         train_set->put(each_train_sample_index, inputsize + targetsize, sample_weights[each_train_sample_index]);
00549     }
00550 }
00551 
00552 void LocalMedBoost::recomputeSampleWeight()
00553 {
00554     sum_exp_weighted_edges = 0.0;
00555     for (each_train_sample_index = 0; each_train_sample_index < length; each_train_sample_index++)
00556     {
00557         exp_weighted_edges[each_train_sample_index] =  sample_weights[each_train_sample_index] *
00558             safeexp(-1.0 * function_weights[stage] * base_awards[each_train_sample_index]);
00559         sum_exp_weighted_edges += exp_weighted_edges[each_train_sample_index];
00560     }
00561     for (each_train_sample_index = 0; each_train_sample_index < length; each_train_sample_index++)
00562     {
00563         sample_weights[each_train_sample_index] = exp_weighted_edges[each_train_sample_index] / sum_exp_weighted_edges;
00564         train_set->put(each_train_sample_index, inputsize + targetsize,
00565                        sample_weights[each_train_sample_index]);
00566     }
00567 }
00568 
00569 void LocalMedBoost::verbose(string the_msg, int the_level)
00570 {
00571     if (verbosity >= the_level)
00572         cout << the_msg << endl;
00573 }
00574  
00575 
00576 void LocalMedBoost::forget()
00577 {
00578     stage = 0;
00579 }
00580 
00581 int LocalMedBoost::outputsize() const
00582 {
00583     return 4;
00584 }
00585 
00586 TVec<string> LocalMedBoost::getTrainCostNames() const
00587 {
00588     TVec<string> return_msg(6);
00589     return_msg[0] = "mse";
00590     return_msg[1] = "base_confidence";
00591     return_msg[2] = "l1";
00592     return_msg[3] = "rob_minus";
00593     return_msg[4] = "rob_plus";
00594     return_msg[5] = "min_rob";
00595     return return_msg;
00596 }
00597 
00598 TVec<string> LocalMedBoost::getTestCostNames() const
00599 { 
00600     return getTrainCostNames();
00601 }
00602 
00603 void LocalMedBoost::computeOutput(const Vec& inputv, Vec& outputv) const
00604 {
00605     if (end_stage < 1)
00606         PLERROR("LocalMedBoost: No function has been built"); 
00607     TVec<real>  base_regressor_outputs;         // vector of base regressor outputs for a sample
00608     TVec<real>  base_regressor_confidences;     // vector of base regressor confidences for a sample
00609     Vec         base_regressor_outputv;         // vector of a base regressor computed prediction
00610     real        sum_alpha;
00611     real        sum_function_weights;           // sum of all regressor weighted confidences 
00612     real        norm_sum_function_weights;
00613     real        sum_fplus_weights;              // sum of the regressor weighted confidences for the f+ function
00614     real        sum_fminus_weights;
00615     real        zero_quantile;
00616     real        rob_quantile;
00617     real        output_rob_plus;
00618     real        output_rob_minus;
00619     real        output_rob_save;
00620     int         index_j;                        // index to go thru the base regressor's arrays
00621     int         index_t;                        // index to go thru the base regressor's arrays
00622     base_regressor_outputs.resize(end_stage);
00623     base_regressor_confidences.resize(end_stage);
00624     base_regressor_outputv.resize(2);
00625     sum_function_weights = 0.0;
00626     sum_alpha = 0.0;
00627     outputv[0] = -1E9;
00628     outputv[1] = 0.0;
00629     output_rob_plus = 1E9;
00630     output_rob_minus = -1E9;
00631     for (index_t = 0; index_t < end_stage; index_t++) 
00632     {
00633         base_regressors[index_t]->computeOutput(inputv, base_regressor_outputv);
00634         base_regressor_outputs[index_t] = base_regressor_outputv[0];
00635         base_regressor_confidences[index_t] = base_regressor_outputv[1];
00636         if (base_regressor_outputs[index_t] > outputv[0])
00637         {
00638             outputv[0] = base_regressor_outputs[index_t];
00639             outputv[1] = base_regressor_confidences[index_t];
00640         }
00641         sum_alpha += function_weights[index_t];
00642         sum_function_weights += function_weights[index_t] * base_regressor_confidences[index_t];
00643     }
00644     norm_sum_function_weights = sum_function_weights / sum_alpha;
00645     if (norm_sum_function_weights > 0.0) rob_quantile = 0.5 * (1.0 - (robustness / norm_sum_function_weights) * sum_function_weights);
00646     else rob_quantile = 0.0;
00647     zero_quantile = 0.5 * sum_function_weights;
00648     for (index_j = 0; index_j < end_stage; index_j++) 
00649     {
00650         sum_fplus_weights = 0.0;
00651         sum_fminus_weights = 0.0;
00652         for (index_t = 0; index_t < end_stage; index_t++)
00653         {
00654             if (base_regressor_outputs[index_j] < base_regressor_outputs[index_t])
00655             {
00656                 sum_fplus_weights += function_weights[index_t] * base_regressor_confidences[index_t];
00657             }
00658             if (base_regressor_outputs[index_j] > base_regressor_outputs[index_t])
00659             {
00660                 sum_fminus_weights += function_weights[index_t] * base_regressor_confidences[index_t];
00661             }
00662         }
00663         if (norm_sum_function_weights > 0.0 && sum_fplus_weights  < zero_quantile)
00664         {
00665             if (base_regressor_outputs[index_j] < outputv[0])
00666             {
00667                 outputv[0] = base_regressor_outputs[index_j];
00668                 outputv[1] = base_regressor_confidences[index_j];
00669             }
00670         }
00671         if (norm_sum_function_weights > 0.0 && sum_fplus_weights  < rob_quantile)
00672         {
00673             if (base_regressor_outputs[index_j] < output_rob_plus)
00674             {
00675                 output_rob_plus = base_regressor_outputs[index_j];
00676             }
00677         }
00678         if (norm_sum_function_weights > 0.0 && sum_fminus_weights  < rob_quantile)
00679         {
00680             if (base_regressor_outputs[index_j] > output_rob_minus)
00681             {
00682                 output_rob_minus = base_regressor_outputs[index_j];
00683             }
00684         }
00685     }
00686     if (output_rob_minus > output_rob_plus)
00687     {
00688         output_rob_save = output_rob_minus;
00689         output_rob_minus = output_rob_plus;
00690         output_rob_plus = output_rob_save;
00691     }
00692     outputv[2] = output_rob_minus;
00693     outputv[3] = output_rob_plus;
00694 }
00695 
00696 void LocalMedBoost::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, Vec& outputv, Vec& costsv) const
00697 {
00698     computeOutput(inputv, outputv);
00699     computeCostsFromOutputs(inputv, outputv, targetv, costsv);
00700 }
00701 
00702 void LocalMedBoost::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv,
00703                                             const Vec& targetv, Vec& costsv) const
00704 {
00705     costsv[0] = square_f(outputv[0] - targetv[0]);
00706     costsv[1] = outputv[1];
00707     if (abs(outputv[0] - targetv[0]) > loss_function_weight) costsv[2] = 1.0;
00708     else costsv[2] = 0.0;
00709     costsv[3] = outputv[3] - outputv[0];
00710     costsv[4] = outputv[0] - outputv[2];
00711     if (costsv[3] < costsv[4]) costsv[5] = costsv[3];
00712     else costsv[5] = costsv[4];
00713 }
00714 
00715 } // end of namespace PLearn
00716 
00717 
00718 /*
00719   Local Variables:
00720   mode:c++
00721   c-basic-offset:4
00722   c-file-style:"stroustrup"
00723   c-file-offsets:((innamespace . 0)(inline-open . 0))
00724   indent-tabs-mode:nil
00725   fill-column:79
00726   End:
00727 */
00728 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines