PLearn 0.1
AddCostToLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // AddCostToLearner.cc
00004 //
00005 // Copyright (C) 2004 Olivier Delalleau 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: AddCostToLearner.cc 10134 2009-04-20 21:32:40Z nouiz $ 
00037  ******************************************************* */
00038 
00039 // Authors: Olivier Delalleau
00040 
00044 #include "AddCostToLearner.h"
00045 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00046 #include <plearn/var/CrossEntropyVariable.h>
00047 #include <plearn/vmat/SubVMatrix.h>
00048 #include <plearn/var/SumOverBagsVariable.h>   
00049 #include <plearn/var/VarArray.h>
00050 #include <plearn/var/VecElementVariable.h>
00051 #include <plearn/sys/Profiler.h>
00052 
00053 namespace PLearn {
00054 using namespace std;
00055 
00056 PLEARN_IMPLEMENT_OBJECT(AddCostToLearner,
00057                         "A PLearner that just adds additional costs to another PLearner.",
00058                         "In addition, this learner can be used to compute costs on bags instead of\n"
00059                         "individual samples, using the option 'compute_costs_on_bags' (this will\n"
00060                         "also automatically remove the bags column from the training set, so that\n"
00061                         "the learner can be trained as usual).\n"
00062                         "\n"
00063                         "Note that for now, the added costs are only added as test costs.\n"
00064                         "\n"
00065                         "Feel free to make this class evolve by adding new costs, or rewriting it\n"
00066                         "in a better fashion, because this one is certainly not perfect.\n"
00067                         "To use the lift cost, do the following:\n"
00068                         " (1) add a cost of type 'lift_output' to this object's option 'costs'\n"
00069                         " (2) replace the template_stats_collector of your PTester with one like this:\n"
00070                         "   template_stats_collector =\n"
00071                         "     LiftStatsCollector (\n"
00072                         "      lift_fraction = 0.1 ;\n"
00073                         "      output_column = \"lift_output\" ;\n"
00074                         "      opposite_lift = 0 ; # to set to 1 if we want to optimize it\n"
00075                         "      sign_trick = 1 ;\n"
00076                         " (3) ask for the lift in the stats:\n"
00077                         "   statnames = [\n"
00078                         "     \"E[test1.LIFT]\"\n"
00079                         "     \"E[test1.LIFT_MAX]\"\n"
00080                         "   ];" );
00081 
00083 // AddCostToLearner //
00085 AddCostToLearner::AddCostToLearner()
00086     : bag_size(0),
00087       train_time(0),
00088       total_train_time(0),
00089       test_time(0),
00090       total_test_time(0),
00091       train_time_b(false),
00092       test_time_b(false),
00093       check_output_consistency(1),
00094       combine_bag_outputs_method(1),
00095       compute_costs_on_bags(0),
00096       force_output_to_target_interval(0),
00097       from_max(1),
00098       from_min(-1),
00099       rescale_output(0),
00100       rescale_target(0),
00101       to_max(1),
00102       to_min(0),
00103       n_classes(-1),
00104       confusion_matrix_target(0),
00105       find_class_threshold(false)
00106 {}
00107 
00109 // declareOptions //
00111 void AddCostToLearner::declareOptions(OptionList& ol)
00112 {
00113     declareOption(ol, "check_output_consistency", &AddCostToLearner::check_output_consistency, OptionBase::buildoption,
00114                   "If set to 1, additional checks will be performed to make sure the output\n"
00115                   "is compatible with the costs to be computed. This may slow down the costs\n"
00116                   "computation, but is also safer.");
00117 
00118     declareOption(ol, "combine_bag_outputs_method", &AddCostToLearner::combine_bag_outputs_method, OptionBase::buildoption,
00119                   "The method used to combine the individual outputs of the sub-learner to\n"
00120                   "obtain a global output on the bag (irrelevant if 'compute_costs_on_bags' == 0):\n"
00121                   " - 1 : o = 1 - (1 - o_1) * (1 - o_2) * .... * (1 - o_n)\n"
00122                   " - 2 : o = max(o_1, o_2, ..., o_n)");
00123 
00124     declareOption(ol, "compute_costs_on_bags", &AddCostToLearner::compute_costs_on_bags, OptionBase::buildoption,
00125                   "If set to 1, then the costs will be computed on bags, but the sub-learner will\n"
00126                   "be trained without the bag information (see SumOverBagsVariable for info on bags).");
00127 
00128     declareOption(ol, "costs", &AddCostToLearner::costs, OptionBase::buildoption,
00129         "The costs to be added:\n"
00130         " - 'class_error': classification error. If the sub-learner's output\n"
00131         "   has the same length as the target vector, then they are compared\n"
00132         "   component-wise. Otherwise, the target must be a one-dimensional\n"
00133         "   vector (an integer corresponding to the class), and the output\n"
00134         "   from the sub-learner is interpreted as a vector of weights for\n"
00135         "   each class.\n"
00136         " - 'binary_class_error': classification error for a one-dimensional\n"
00137         "   target that must be either 0 or 1. The output must also be one-\n"
00138         "   dimensional, and is interpreted as the predicted probability for\n"
00139         "   class 1 (thus class 1 is chosen when the output is > 0.5)\n"
00140         " - 'linear_class_error': as class_error execpt that the output is the\n"
00141         "   difference between the class values\n"
00142         " - 'square_class_error': as class_error execpt that the output is the\n"
00143         "   square of the difference between the class values\n"
00144         " - 'confusion_matrix': give the confusion matrix for the target\n"
00145         "   'confusion_matrix_target', where the row is the predicted class\n"
00146         "    and the column is the target class\n"
00147         " - 'lift_output': to compute the lift cost (for the positive class)\n"
00148         " - 'opposite_lift_output': to compute the lift cost (for the negative) class\n"
00149         " - 'cross_entropy': -t*log(o) - (1-t)*log(1-o)\n"
00150         " - 'NLL': -log(o[t])\n"
00151         " - 'mse': the mean squared error (o - t)^2\n"
00152         " - 'squared_norm_reconstruction_error': | ||i||^2 - ||o||^2 |\n"
00153         " - 'train_time': the time spend in the last call to the train() function\n"
00154         " - 'total_train_time': the total time spend in the train() function\n"
00155         " - 'test_time': the time spend in test() fct the between the last two call to train()\n"
00156         " - 'total_test_time': the sum of test_time\n"
00157         " - 'type1_err': SUM[type1_err] will return the number of type 1 error(false positive).\n"
00158         "                E[type1_err], will return the false positive rate: # false positive/# of positive\n" 
00159         " - 'type2_err': idem as type1_err but for the type 2 error(false negative)\n" 
00160         " - 'sensitivity': E[sensitivity] return nb true pos/nb total pos"
00161         " - 'specificity': E[specificity] return nb true neg/nb total ng"
00162     );
00163 
00164     declareOption(ol, "force_output_to_target_interval", &AddCostToLearner::force_output_to_target_interval, OptionBase::buildoption,
00165                   "If set to 1 and 'rescale_output' is also set to 1, then the scaled output\n"
00166                   "will be forced to belong to [to_min, to_max], which may not be the case otherwise\n"
00167                   "if the output doesn't originate from [from_min, from_max].");
00168       
00169     declareOption(ol, "rescale_output", &AddCostToLearner::rescale_output, OptionBase::buildoption,
00170                   "If set to 1, then the output will be rescaled before computing the costs, according\n"
00171                   "to the values of from_min, from_max, to_min, to_max. This means it will map\n"
00172                   "[from_min, from_max] to [to_min, to_max].");
00173 
00174     declareOption(ol, "rescale_target", &AddCostToLearner::rescale_target, OptionBase::buildoption,
00175                   "Same as 'rescale_output', but for the target.");
00176 
00177     declareOption(ol, "from_max", &AddCostToLearner::from_max, OptionBase::buildoption,
00178                   "Upper bound of the source interval [from_min, from_max] (used in rescaling).");
00179 
00180     declareOption(ol, "from_min", &AddCostToLearner::from_min, OptionBase::buildoption,
00181                   "Lower bound of the source interval [from_min, from_max] (used in rescaling).");
00182 
00183     declareOption(ol, "to_max", &AddCostToLearner::to_max, OptionBase::buildoption,
00184                   "Upper bound of the destination interval [to_min, to_max] (used in rescaling).");
00185 
00186     declareOption(ol, "to_min", &AddCostToLearner::to_min, OptionBase::buildoption,
00187                   "Lower bound of the destination interval [to_min, to_max] (used in rescaling).");
00188     
00189     declareOption(ol, "n_classes", &AddCostToLearner::n_classes, OptionBase::buildoption,
00190         "The number of classes. Only needed for the 'confusion_matrix' cost.");
00191 
00192     declareOption(ol, "confusion_matrix_target",
00193                   &AddCostToLearner::confusion_matrix_target,
00194                   OptionBase::buildoption,
00195         "Index of the target for which the confusion matrix is computed.");
00196 
00197     declareOption(ol, "find_class_threshold",
00198                   &AddCostToLearner::find_class_threshold,
00199                   OptionBase::buildoption,
00200         "If true, then during training we find the best threshold between\n"
00201         "classes.");
00202 
00203     declareOption(ol, "train_time",
00204                   &AddCostToLearner::train_time, OptionBase::learntoption,
00205                   "The time spent in the last call to train() in second.");
00206 
00207     declareOption(ol, "total_train_time",
00208                   &AddCostToLearner::total_train_time, OptionBase::learntoption,
00209                   "The total time spent in the train() function in second.");
00210 
00211     declareOption(ol, "test_time",
00212                   &AddCostToLearner::test_time, OptionBase::learntoption,
00213                   "The time spent in the last call to test() in second.");
00214 
00215     declareOption(ol, "total_test_time",
00216                   &AddCostToLearner::total_test_time, OptionBase::learntoption,
00217                   "The total time spent in the test() function in second.");
00218 
00219     declareOption(ol, "train_time_b",
00220                   &AddCostToLearner::train_time, OptionBase::learntoption,
00221                   "If we should calculate the time spent in the train.");
00222 
00223     declareOption(ol, "test_time_b",
00224                   &AddCostToLearner::test_time, OptionBase::learntoption,
00225                   "If we should calculate the time spent in the test.");
00226 
00227     // Now call the parent class' declareOptions
00228     inherited::declareOptions(ol);
00229 }
00230 
00232 // build //
00234 void AddCostToLearner::build()
00235 {
00236     inherited::build();
00237     build_();
00238 }
00239 
00241 // build_ //
00243 void AddCostToLearner::build_()
00244 {
00245     // Give a default size to bag_outputs.
00246     bag_outputs.resize(10, 1);
00247     // Make sure all costs are valid.
00248     int n = costs.length();
00249     int min_verb = 2;
00250     bool display = (verbosity >= min_verb);
00251     int os = learner_->outputsize();
00252     if (os < 0) {
00253         // The sub-learner does not know its outputsize yet: we skip the build for
00254         // now, it will have to be done later.
00255         if (display)
00256             cout << "In AddCostToLearner::build_ - The sub-learner does not know its outputsize yet, skipping" << endl;
00257         return;
00258     }
00259     sub_learner_output.resize(os);
00260     desired_target.resize(os);
00261     if (rescale_output || rescale_target) {
00262         real from_fac = from_max - from_min;
00263         real to_fac = to_max - to_min;
00264         fac = to_fac / from_fac;
00265     }
00266     output_min = -REAL_MAX;
00267     output_max = REAL_MAX;
00268     if (n > 0 && display) {
00269         cout << "Additional costs computed: ";
00270     }
00271     for (int i = 0; i < n; i++) {
00272         string c = costs[i];
00273         if (display) cout << c << " ";
00274         if (c == "lift_output") {
00275             // Output should be positive.
00276             output_min = max(output_min, real(0));
00277         } else if (c == "opposite_lift_output") {
00278             // 1 - output should be positive.
00279             output_max = min(output_max, real(1));
00280         } else if (c == "cross_entropy") {
00281             // Output should be in [0,1].
00282             output_min = max(output_min, real(0));
00283             output_max = min(output_max, real(1));
00284             {
00285                 Var zero = var(0);
00286                 output_var = accessElement(sub_learner_output, zero);
00287                 target_var = accessElement(desired_target, zero);
00288                 cross_entropy_var = cross_entropy(output_var, target_var);
00289                 cross_entropy_prop = propagationPath(cross_entropy_var);
00290             }
00291         } else if (c == "mse") {
00292         } else if (c == "squared_norm_reconstruction_error") {
00293         } else if (c == "class_error") {
00294         } else if (c == "binary_class_error") {
00295         } else if (c == "train_time") {
00296             train_time_b=true;
00297         } else if (c == "total_train_time") {
00298             train_time_b=true;
00299         } else if (c == "test_time") {
00300             test_time_b=true;
00301         } else if (c == "total_test_time") {
00302             test_time_b=true;
00303         } else if (c == "linear_class_error") {
00304         } else if (c == "square_class_error") {
00305         } else if (c == "confusion_matrix") {
00306             if(n_classes<=0)
00307                 PLERROR("In AddCostToLearner::build_ there must be a positive number of class. n_classes ="+n_classes);
00308             output_min = 0;
00309             output_max = n_classes;
00310         } else if (c == "NLL") {
00311             // Output should be in [0,1].
00312             output_min = max(output_min, real(0));
00313             output_max = min(output_max, real(1));
00314         } else if (c == "type1_err") {
00315             output_min = 0;
00316             output_max = 1;
00317         } else if (c == "type2_err") {
00318             output_min = 0;
00319             output_max = 1;
00320         } else if (c == "sensitivity") {
00321             output_min = 0;
00322             output_max = 1;
00323         } else if (c == "specificity") {
00324             output_min = 0;
00325             output_max = 1;
00326         } else {
00327             PLERROR("In AddCostToLearner::build_ - Invalid cost requested %s (make sure you are using the new costs syntax)",c.c_str());
00328         }
00329     }
00330     if (n > 0 && display) {
00331         cout << endl;
00332     }
00333     
00334     if(test_time_b)
00335         Profiler::reset("AddCostToLearner::test");
00336 
00337     if(test_time_b || train_time_b)
00338         Profiler::activate();
00339 }
00340 
00342 // computeCostsFromOutputs //
00344 void AddCostToLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00345                                                 const Vec& target, Vec& costs,
00346                                                 const bool add_sub_learner_costs) const
00347 {
00348     int n_original_costs = learner_->nTestCosts();
00349     // We give only costs.subVec to the sub-learner because it may want to resize it.
00350     costs.resize(nTestCosts());
00351     Vec sub_costs = costs.subVec(0, n_original_costs);
00352     int target_length = target.length();
00353     if(add_sub_learner_costs){
00354         if (compute_costs_on_bags) {
00355             learner_->computeCostsFromOutputs(input, output, target.subVec(0, target_length - 1), sub_costs);
00356         } else {
00357             learner_->computeCostsFromOutputs(input, output, target, sub_costs);
00358         }
00359     }
00360 
00361     if (compute_costs_on_bags) {
00362         // We only need to compute the costs when the whole bag has been seen,
00363         // otherwise we just store the outputs of each sample in the bag and fill
00364         // the cost with MISSING_VALUE.
00365         int bag_signal = int(target[target_length - 1]);
00366         if (bag_signal & SumOverBagsVariable::TARGET_COLUMN_FIRST) {
00367             // Beginning of the bag.
00368             bag_size = 0;
00369         }
00370         if (bag_outputs.width() != output.length()) {
00371             // Need to resize bag_outputs.
00372             bag_outputs.resize(bag_outputs.length(), output.length());
00373         }
00374         if (bag_outputs.length() <= bag_size) {
00375             // Need to resize bag_outputs.
00376             bag_outputs.resize(bag_outputs.length() * 2, bag_outputs.width());
00377         }
00378         bag_outputs(bag_size) << output;
00379         bag_size++;
00380         if (bag_signal & SumOverBagsVariable::TARGET_COLUMN_LAST) {
00381             // Reached the end of the bag: we can compute the output for the bag.
00382             bag_outputs.resize(bag_size, bag_outputs.width());
00383             combined_output.resize(output.length());
00384             switch (combine_bag_outputs_method) {
00385             case 1: // o = 1 - (1 - o_1) * (1 - o_2) * .... * (1 - o_n)
00386             {
00387                 real prod;
00388                 for (int j = 0; j < bag_outputs.width(); j++) {
00389                     prod = 1;
00390                     for (int i = 0; i < bag_outputs.length(); i++) {
00391                         prod = prod * (1 - bag_outputs(i, j));
00392                     }
00393                     combined_output[j] = 1 - prod;
00394                 }
00395             }
00396             break;
00397             case 2: // o = max(o_1, o_2, ..., o_n)
00398             {
00399                 for (int j = 0; j < bag_outputs.width(); j++) {
00400                     combined_output[j] = max(bag_outputs.column(j));
00401                 }
00402             }
00403             break;
00404             default:
00405                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - Unknown value for 'combine_bag_outputs_method'");
00406             }
00407             // We re-compute the sub-learner's costs with the brand new combined bag output.
00408             if(add_sub_learner_costs)
00409                 learner_->computeCostsFromOutputs(input, combined_output, target.subVec(0, target_length - 1), sub_costs);
00410         } else {
00411             costs.fill(MISSING_VALUE);
00412             return;
00413         }
00414     } else {
00415         combined_output = output;
00416     }
00417 
00418     Vec the_target;
00419     if (compute_costs_on_bags) {
00420         the_target = target.subVec(0, target_length - 1);
00421     } else {
00422         the_target = target;
00423     }
00424 
00425     // Optional rescaling.
00426     if (!rescale_output) {
00427         sub_learner_output << combined_output;
00428     } else {
00429         int n = output.length();
00430         real scaled_output;
00431         for (int i = 0; i < n; i++) {
00432             scaled_output = (combined_output[i] - from_min) * fac + to_min;
00433             if (force_output_to_target_interval) {
00434                 if (scaled_output > to_max) {
00435                     scaled_output = to_max;
00436                 } else if (scaled_output < to_min) {
00437                     scaled_output = to_min;
00438                 }
00439             }
00440             sub_learner_output[i] = scaled_output;
00441         }
00442     }
00443     if (!rescale_target) {
00444         desired_target.resize(the_target.length());
00445         desired_target << the_target;
00446     } else {
00447         int n = output.length();
00448         if (n != target_length)
00449             PLERROR("In AddCostToLearner::computeCostsFromOutputs - When rescaling, "
00450                     "output and target are expected to have the same length");
00451         for (int i = 0; i < n; i++) {
00452             desired_target[i] = (the_target[i] - from_min) * fac + to_min;
00453         }
00454     }
00455 
00456     if (check_output_consistency) {
00457         real out;
00458         for (int i = 0; i < sub_learner_output.length(); i++) {
00459             out = sub_learner_output[i];
00460             if (out < output_min) {
00461                 if (fast_is_equal(out, output_min))
00462                     sub_learner_output[i] = output_min;
00463                 else 
00464                     PLERROR("In AddCostToLearner::computeCostsFromOutputs - "
00465                             "Sub-learner output (%f) is lower than %f",
00466                             out, output_min);
00467             }
00468             if (out > output_max) {
00469                 if (fast_is_equal(out, output_max))
00470                     sub_learner_output[i] = output_max;
00471                 else
00472                     PLERROR("In AddCostToLearner::computeCostsFromOutputs - "
00473                             "Sub-learner output (%f) is higher than %f",
00474                             out, output_max);
00475             }
00476         }
00477     }
00478     int ind_cost = n_original_costs - 1;
00479     for (int i = 0; i < this->costs.length(); i++) {
00480         string c = this->costs[i];
00481         ind_cost++;
00482         if (c == "lift_output" || c == "opposite_lift_output") {
00483 #ifdef BOUNDCHECK
00484             if (desired_target.length() != 1 && (sub_learner_output.length() != 1 || sub_learner_output.length() != 2)) {
00485                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - Lift cost is "
00486                         "only meant to be used with a one-dimensional target, and a "
00487                         "one-dimensional output or a two-dimensional output (which "
00488                         "would give the weights for classes 0 and 1 respectively)");
00489             }
00490 #endif
00491             {
00492                 // The 'lift cost', which actually isn't a cost, is the output when
00493                 // the target is 1, and -output when the target is 0.
00494                 // The 'opposite_lift cost' is 1-output when the target is 0, and
00495                 // -(1-output) when thte target is 1.
00496 #ifdef BOUNDCHECK
00497                 if (!fast_exact_is_equal(desired_target[0], 0) &&
00498                     !fast_exact_is_equal(desired_target[0], 1)) {
00499                     // Invalid target.
00500                     PLERROR("In AddCostToLearner::computeCostsFromOutputs - Target "
00501                             "%f isn't compatible with lift", desired_target[0]);
00502                 }
00503 #endif
00504                 bool opposite_lift = (c == "opposite_lift_output");
00505                 if (fast_exact_is_equal(desired_target[0], 1)) {
00506                     if (sub_learner_output.length() == 1)
00507                         if (opposite_lift)
00508                             costs[ind_cost] = sub_learner_output[0] - 1;
00509                         else
00510                             costs[ind_cost] = sub_learner_output[0];
00511                     else
00512                         if (opposite_lift)
00513                             costs[ind_cost] = - (sub_learner_output[0] - sub_learner_output[1] + 1) / 2.0;
00514                         else
00515                             costs[ind_cost] = (sub_learner_output[1] - sub_learner_output[0] + 1) / 2.0;
00516                 } else {
00517                     if (sub_learner_output.length() == 1)
00518                         if (opposite_lift)
00519                             costs[ind_cost] = 1 - sub_learner_output[0];
00520                         else
00521                             costs[ind_cost] = - sub_learner_output[0];
00522                     else
00523                         if (opposite_lift)
00524                             costs[ind_cost] = (sub_learner_output[0] - sub_learner_output[1] + 1) / 2.0;
00525                         else
00526                             costs[ind_cost] = - (sub_learner_output[1] - sub_learner_output[0] + 1) / 2.0;
00527                 }
00528             }
00529         } else if (c == "cross_entropy") {
00530 #ifdef BOUNDCHECK
00531             if (!fast_exact_is_equal(desired_target[0], 0) &&
00532                 !fast_exact_is_equal(desired_target[0], 1)) {
00533                 // Invalid target.
00534                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - Target isn't compatible with cross_entropy");
00535             }
00536 #endif
00537             cross_entropy_prop.fprop();
00538             costs[ind_cost] = cross_entropy_var->valuedata[0];
00539         } else if (c == "NLL") {
00540             PLASSERT_MSG(fast_exact_is_equal(desired_target[0],
00541                         round(desired_target[0])), "The target must be an "
00542                     "integer");
00543             int class_target = int(round(desired_target[0]));
00544             PLASSERT_MSG(class_target < sub_learner_output.length(),
00545                     "The sub learner output must have a size equal to the "
00546                     "number of classes");
00547             costs[ind_cost] = - pl_log(sub_learner_output[class_target]);
00548        } else if (c == "class_error") {
00549             int output_length = sub_learner_output.length();
00550             bool good = true;
00551             if (output_length == target_length) {
00552                 for (int k = 0; k < desired_target.length(); k++)
00553                     if (!is_equal(desired_target[k],
00554                                   sub_learner_output[k])) {
00555                         good = false;
00556                         break;
00557                     }
00558             } else if (target_length == 1) {
00559                 // We assume the target is a number between 0 and c-1, and the output
00560                 // is a vector of length c giving the weight for each class.
00561                 good = is_equal(argmax(sub_learner_output), desired_target[0]);
00562             } else {
00563                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - Wrong "
00564                         "output and/or target for the 'class_error' cost");
00565             }
00566             costs[ind_cost] = good ? 0 : 1;
00567         } else if (c == "binary_class_error") {
00568             PLASSERT( target_length == 1 );
00569             real t = desired_target[0];
00570             PLASSERT( fast_exact_is_equal(t, 0) || fast_exact_is_equal(t, 1));
00571             PLASSERT( sub_learner_output.length() == 1 );
00572             real predict = sub_learner_output[0] > 0.5 ? 1 : 0;
00573             costs[ind_cost] = is_equal(t, predict) ? 0 : 1;
00574          } else if (c == "linear_class_error") {
00575             int output_length = sub_learner_output.length();
00576             int diff = 0;
00577             if (output_length == target_length) {
00578                 for (int k = 0; k < desired_target.length(); k++)
00579                     diff += abs(int(round(desired_target[k])) - int(round(sub_learner_output[k])));
00580             } else if (target_length == 1) {
00581                 // We assume the target is a number between 0 and c-1, and the output
00582                 // is a vector of length c giving the weight for each class.
00583                 diff = abs(argmax(sub_learner_output) - int(round(desired_target[0])));
00584             } else {
00585                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - Wrong "
00586                         "output and/or target for the 'linear_class_error' cost");
00587             }
00588             costs[ind_cost] = diff;
00589          } else if (c == "square_class_error") {
00590             int output_length = sub_learner_output.length();
00591             int diff = 0;
00592             if (output_length == target_length) {
00593                 for (int k = 0; k < desired_target.length(); k++) {
00594                     int d = int(round(desired_target[k])) - int(round(sub_learner_output[k]));
00595                     diff += d*d;
00596                 }
00597             } else if (target_length == 1) {
00598                 // We assume the target is a number between 0 and c-1, and the output
00599                 // is a vector of length c giving the weight for each class.
00600                 diff = argmax(sub_learner_output) - int(round(desired_target[0]));
00601                 diff *= diff;
00602             } else {
00603                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - Wrong "
00604                         "output and/or target for the 'square_class_error' cost");
00605             }
00606             costs[ind_cost] = diff;
00607         } else if (c == "confusion_matrix") {
00608 
00609 #ifdef BOUNDCHECK
00610             if (confusion_matrix_target >= target_length || confusion_matrix_target<-1)
00611                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - confusion_matrix_target(%d) "
00612                         "not in the range of target_length(%d)", confusion_matrix_target, target_length);
00613 #endif
00614             int sub_learner_out;
00615             real the_target;
00616             if (confusion_matrix_target==-1) {
00617                 //output are probability
00618                 sub_learner_out = argmax(sub_learner_output);
00619                 the_target = desired_target[0];
00620             }else{
00621                 sub_learner_out = int(round(sub_learner_output[confusion_matrix_target]));
00622                 the_target = desired_target[confusion_matrix_target];
00623             }
00624             if(sub_learner_out<0){
00625               PLWARNING("In AddCostToLearner::computeCostsFromOutputs - bad value for sub_learner_out %d, we use 0 instead", sub_learner_out);
00626               sub_learner_out = 0;
00627             }
00628             if(sub_learner_out>=n_classes){
00629               PLWARNING("In AddCostToLearner::computeCostsFromOutputs - bad value for sub_learner_out %d, we use %d instead", sub_learner_out,n_classes -1);
00630               sub_learner_out = n_classes - 1;
00631             }
00632             PLCHECK(sub_learner_out<n_classes && sub_learner_out>=0);
00633 //if outside allowd range, will access the wrong element in the cost vector
00634 #ifdef BOUNDCHECK
00635             if (sub_learner_out >= n_classes
00636                 || is_missing(sub_learner_out))
00637                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - bad output value of sub_learner: sub_learner_out=%d,  "
00638                         " missing or higher or egual to n_classes (%d)",
00639                         sub_learner_out,n_classes);
00640             if (the_target >= n_classes
00641                 ||is_missing(the_target))
00642                 PLERROR("In AddCostToLearner::computeCostsFromOutputs - bad output value of the_target=%f, missing or higher or egual to n_classes (%d)",
00643                         the_target, n_classes);
00644 #endif
00645             for(int local_ind = ind_cost ; local_ind < (n_classes*n_classes+ind_cost); local_ind++){
00646                 costs[local_ind] = 0;
00647             }
00648             int local_ind = ind_cost + sub_learner_out + int(round(the_target))*n_classes;
00649 
00650             costs[local_ind] = 1;
00651             ind_cost += n_classes*n_classes - 1;//less one as the loop add one
00652         } else if (c == "mse") {
00653             costs[ind_cost] = powdistance(desired_target, sub_learner_output);
00654         } else if (c == "squared_norm_reconstruction_error") {
00655             PLWARNING("In AddCostToLearner::computeCostsFromOutputs - 'squared_norm_reconstruction_error'"
00656                       " has not been tested yet, please remove this warning if it works correctly");
00657             costs[ind_cost] = abs(pownorm(input, 2) - pownorm(sub_learner_output, 2));
00658         } else if (c == "train_time") {
00659             costs[ind_cost] = train_time;
00660         } else if (c == "total_train_time") {
00661             costs[ind_cost] = total_train_time;
00662         } else if (c == "test_time") {
00663             costs[ind_cost] = test_time;
00664         } else if (c == "total_test_time") {
00665             costs[ind_cost] = total_test_time;
00666         } else if (c == "type1_err") {
00667             //false positive error
00668             //faux negatif/(faux negatif+vrai positif)
00669 #ifdef BOUNDCHECK
00670             PLASSERT(sub_learner_output.length()==1);
00671 #endif
00672             real target=desired_target[0];
00673             real out=sub_learner_output[0];
00674             if(fast_is_equal(target,1)){
00675                 if (fast_is_equal(out,0))
00676                     costs[ind_cost] = 1;
00677                 else
00678                     costs[ind_cost] = 0;
00679             }else
00680                 costs[ind_cost] = MISSING_VALUE;
00681         } else if (c == "type2_err") {
00682             //false negative error
00683             //faux positif/(faux positif+ vrai negatif)
00684 #ifdef BOUNDCHECK
00685             PLASSERT(sub_learner_output.length()==1);
00686 #endif
00687             real target=desired_target[0];
00688             real out=sub_learner_output[0];
00689             if(fast_is_equal(target,0)){
00690                 if(fast_is_equal(out,1))
00691                     costs[ind_cost] = 1;
00692                 else
00693                     costs[ind_cost] = 0;
00694             }else
00695                 costs[ind_cost] = MISSING_VALUE;
00696         } else if (c == "sensitivity") {
00697             //nb true pos/(nb true pos + nb false neg)
00698             //equiv to=nb true pos/nb total pos
00699             //should use X[test1.E[sensitivity]] to have the real value
00700 #ifdef BOUNDCHECK
00701             PLASSERT(sub_learner_output.length()==1);
00702 #endif
00703             real target=desired_target[0];
00704             real out=sub_learner_output[0];
00705                 
00706             if(fast_is_equal(target,1)){
00707                 if(fast_is_equal(out,1))
00708                     costs[ind_cost] = 1;
00709                 else
00710                     costs[ind_cost] = 0;
00711             }else
00712                 costs[ind_cost] = MISSING_VALUE;
00713         } else if (c == "specificity") {
00714             //nb true neg/(nb true neg + nb false pos)
00715             //equiv to=nb true neg/nb total ng
00716             //should use X[test1.E[specificity]] to have the real value
00717 #ifdef BOUNDCHECK
00718             PLASSERT(sub_learner_output.length()==1);
00719 #endif
00720             real target=desired_target[0];
00721             real out=sub_learner_output[0];
00722              
00723             if( fast_is_equal(target, 0)){
00724                 if(fast_is_equal(out, 0))
00725                     costs[ind_cost] = 1;
00726                 else
00727                 costs[ind_cost] = 0;
00728             } else
00729                 costs[ind_cost] = MISSING_VALUE;
00730         } else {
00731             PLERROR("In AddCostToLearner::computeCostsFromOutputs - Unknown cost");
00732         }
00733     }
00734 }
00735 
00737 // train //
00739 void AddCostToLearner::train()
00740 {
00741     Profiler::start("AddCostToLearner::train");
00742 
00743     int find_threshold = -1;
00744     if(find_class_threshold){
00745         for (int i = 0; i < this->costs.length(); i++) {
00746             if(costs[i]=="square_class_error" || costs[i]=="linear_class_error" || costs[i]=="class_error" )
00747                 find_threshold = i;
00748             break;
00749         }
00750         PLASSERT_MSG(-1 != find_threshold , "We where asked to find the "
00751                 "threshold and no *class_error costs are selected.\n"
00752                 "We use the first *class_error cost to select the threshold");
00753     }
00754     inherited::train();
00755     
00756     if(-1 != find_threshold){
00757         
00758         Vec input;
00759         Vec target;
00760         Vec output;
00761         Vec outcosts;
00762         real weight;
00763         output.resize(learner_->outputsize());
00764         outcosts.resize(learner_->nTestCosts());
00765         class_threshold.resize(n_classes);
00766         Vec test_threshold;
00767         Vec best_threshold;
00768         test_threshold.resize(n_classes);
00769         best_threshold.resize(n_classes);
00770         double best_class_error = -1;
00771         int costs_index = -1;
00772         TVec<string> costsnames=getTestCostNames();
00773         Vec paramtotry;
00774         for(float f=0;f<3;f+=0.1)
00775             paramtotry.append(f);
00776 
00777         //find the index of the costs to use.
00778         for(int i=0;i<costsnames.size();i++){
00779             string str1 = costsnames[i];
00780             string str2 = costs[find_threshold];
00781             if( str1 == str2){
00782                 costs_index = i;
00783                 break;
00784             }
00785         }
00786 
00787         for(int a=0;a<paramtotry.size();a++){
00788             for(int b=a+1;b<paramtotry.size();b++){
00789                 test_threshold[0] = paramtotry[a];
00790                 test_threshold[1]  = paramtotry[b];
00791                 double cum_class_error = 0;
00792                 for(int i=0;i<train_set->length();i++){
00793                     learner_->getTrainingSet().getExample(i, input, target, weight);
00794                     computeOutputAndCosts(input, target, output, outcosts);
00795                     cum_class_error += outcosts[costs_index];
00796                 }
00797                 if(best_class_error == -1 || best_class_error > cum_class_error){
00798                     best_threshold << test_threshold;
00799                     best_class_error = cum_class_error;
00800                 }
00801             }
00802         }
00803         class_threshold << best_threshold;
00804         if(verbosity >=2)
00805             for(int i=0;i<class_threshold.size();i++)
00806                 cout << "class_threshold[" << i << "] = " <<class_threshold[i] << endl;
00807 
00808     }
00809     Profiler::end("AddCostToLearner::train");
00810     if(train_time_b){
00811         const Profiler::Stats& stats = Profiler::getStats("AddCostToLearner::train");
00812         real tmp=stats.wall_duration/Profiler::ticksPerSecond();
00813         train_time=tmp - total_train_time;
00814         total_train_time=tmp;
00815     }
00816     if(test_time_b){
00817         //we get the test_time here as we want the test time for all dataset.
00818         //if we put it in the test function, we would have it for one dataset.
00819         const Profiler::Stats& stats_test = Profiler::getStats("AddCostToLearner::test");
00820         real tmp=stats_test.wall_duration/Profiler::ticksPerSecond();
00821         test_time=tmp-total_test_time;
00822         total_test_time=tmp;  
00823     }
00824 }
00825 
00827 // test //
00829 void AddCostToLearner::test(VMat testset, PP<VecStatsCollector> test_stats,
00830                             VMat testoutputs, VMat testcosts) const
00831 {
00832     Profiler::start("AddCostToLearner::test");
00833     inherited::test(testset, test_stats, testoutputs, testcosts);
00834     Profiler::end("AddCostToLearner::test");
00835 }
00836 
00838 // computeOutputAndCosts //
00840 void AddCostToLearner::computeOutputAndCosts(const Vec& input, const Vec& target,
00841                                              Vec& output, Vec& costs) const {
00842     PLASSERT( learner_ );
00843     //done this way to use a possibly optimizer version 
00844     //of computeOutputAndCosts from the sub learner as with NatGradNNet
00845 
00846     Vec sub_costs = costs.subVec(0, learner_->nTestCosts());
00847     learner_->computeOutputAndCosts(input, target, output, sub_costs);
00848     computeCostsFromOutputs(input,output,target,costs,false);
00849 }
00850 
00852 // computeOutputsAndCosts //
00854 void AddCostToLearner::computeOutputsAndCosts(const Mat& input, const Mat& target,
00855                                              Mat& output, Mat& costs) const
00856 {
00857     PLASSERT( learner_ );
00858     //done this way to use a possibly optimizer version 
00859     //of computeOutputsAndCosts from the sub learner as with NatGradNNet
00860     //with a minibatch_size>1
00861     Mat sub_costs = costs.subMatColumns(0, learner_->nTestCosts());
00862     learner_->computeOutputsAndCosts(input, target, output, sub_costs);
00863     for (int i=0;i<input.length();i++)
00864     {
00865         Vec in_i = input(i);
00866         Vec out_i = output(i); 
00867         Vec target_i = target(i);
00868         Vec c_i = costs(i);
00869         computeCostsFromOutputs(in_i,out_i,target_i,c_i,false);
00870     }
00871     
00872 }
00874 // forget //
00876 void AddCostToLearner::forget()
00877 {
00878     inherited::forget();
00879     bag_size = 0;
00880 }
00881     
00883 // getTestCostNames //
00885 TVec<string> AddCostToLearner::getTestCostNames() const
00886 {
00887     TVec<string> sub_costs = learner_->getTestCostNames();
00888     for (int i = 0; i < this->costs.length(); i++) {
00889         if(costs[i] == "confusion_matrix")
00890             for(int conf_i=0; conf_i< n_classes;conf_i++)
00891                 for(int conf_j=0; conf_j<n_classes;conf_j++){
00892                     string s = "confusion_matrix_target"+tostring(conf_i)+"_pred"+tostring(conf_j);
00893                     sub_costs.append(s);
00894                 }
00895         else
00896             sub_costs.append(costs[i]);
00897     }
00898     return sub_costs;
00899 }
00900 
00902 // getTrainCostNames //
00904 TVec<string> AddCostToLearner::getTrainCostNames() const
00905 {
00906     // The added costs are only test costs (so far).
00907     return learner_->getTrainCostNames();
00908 }
00909 
00911 #ifdef __INTEL_COMPILER
00912 #pragma warning(disable:1419)  // Get rid of compiler warning.
00913 #endif
00914 extern void varDeepCopyField(Var& field, CopiesMap& copies);
00915 #ifdef __INTEL_COMPILER
00916 #pragma warning(default:1419)
00917 #endif
00918 
00920 // makeDeepCopyFromShallowCopy //
00922 void AddCostToLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00923 {
00924     inherited::makeDeepCopyFromShallowCopy(copies);
00925     deepCopyField(combined_output,      copies);
00926     deepCopyField(bag_outputs,          copies);
00927     deepCopyField(cross_entropy_prop,   copies);
00928     varDeepCopyField(cross_entropy_var, copies);
00929     deepCopyField(desired_target,       copies);
00930     varDeepCopyField(output_var,        copies);
00931     deepCopyField(sub_learner_output,   copies);
00932     deepCopyField(sub_input,            copies);
00933     varDeepCopyField(target_var,        copies);
00934     deepCopyField(class_threshold,      copies);
00935     deepCopyField(costs,                copies);
00936 }
00937 
00939 // setTrainingSet //
00941 void AddCostToLearner::setTrainingSet(VMat training_set, bool call_forget) {
00942     bool training_set_has_changed = !train_set || !(train_set->looksTheSameAs(training_set));
00943     if (compute_costs_on_bags) {
00944         // We need to remove the bag information (assumed to be in the last column
00945         // of the target) when giving the training set to the sub learner.
00946         // TODO Write a SubTargetVMatrix to make it easier.
00947         if (training_set->inputsize() < 0 || training_set->targetsize() < 0) {
00948             PLERROR("In AddCostToLearner::setTrainingSet - The inputsize and / or targetsize of the training set isn't specified");
00949         }
00950         VMat sub_training_set;
00951         if (training_set->weightsize() > 0) {
00952             sub_training_set = new ConcatColumnsVMatrix(
00953                 new SubVMatrix(training_set, 0, 0, training_set->length(), training_set->inputsize() + training_set->targetsize() - 1),
00954                 new SubVMatrix(training_set, 0, training_set->inputsize() + training_set->targetsize(), training_set->length(), training_set->weightsize())
00955                 );
00956         } else {
00957             sub_training_set = new SubVMatrix(training_set, 0, 0, training_set->length(), training_set->width() - 1);
00958         }
00959         sub_training_set->defineSizes(training_set->inputsize(), training_set->targetsize() - 1, training_set->weightsize());
00960         learner_->setTrainingSet(sub_training_set, false);
00961         // 'call_forget' is set to false for the same reason as in EmbeddedLearner.
00962         if (call_forget && !training_set_has_changed)
00963             learner_->build(); // See EmbeddedLearner comments.
00964     } else {
00965         learner_->setTrainingSet(training_set, false);
00966         if (call_forget && !training_set_has_changed)
00967             learner_->build(); // See EmbeddedLearner comments.
00968     }
00969     PLearner::setTrainingSet(training_set, call_forget);
00970 }
00971 
00972 } // end of namespace PLearn
00973 
00974 
00975 /*
00976   Local Variables:
00977   mode:c++
00978   c-basic-offset:4
00979   c-file-style:"stroustrup"
00980   c-file-offsets:((innamespace . 0)(inline-open . 0))
00981   indent-tabs-mode:nil
00982   fill-column:79
00983   End:
00984 */
00985 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines