PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // RankLearner.cc 00004 // 00005 // Copyright (C) 2004 Olivier Delalleau 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: RankLearner.cc 9192 2008-07-02 16:48:44Z nouiz $ 00037 ******************************************************* */ 00038 00039 // Authors: Olivier Delalleau 00040 00044 #include "RankLearner.h" 00045 00046 namespace PLearn { 00047 using namespace std; 00048 00050 // RankLearner // 00052 RankLearner::RankLearner() 00053 {} 00054 00055 PLEARN_IMPLEMENT_OBJECT(RankLearner, 00056 "Trains another learner to predict the rank of the target, instead of its value.", 00057 "The targets of the training set are sorted by increasing value, and the\n" 00058 "underlying learner is trained to predict the ranks.\n" 00059 "The output of this learner is an interpolation from the targets of the\n" 00060 "training set, given the output (predicted rank) of the sub-learner. A\n" 00061 "linear interpolation between the two closest targets is used, and the\n" 00062 "output is bounded by the lowest and highest targets in the training set.\n" 00063 "\n" 00064 "The costs computed are those of the sub-learner, and they are preceded\n" 00065 "with the 'learner.' prefix. For instance, if the sub-learner computes the\n" 00066 "'mse' cost, this learner will rename it into 'learner.mse'.\n" 00067 ); 00068 00070 // declareOptions // 00072 void RankLearner::declareOptions(OptionList& ol) 00073 { 00074 00075 // Build options. 00076 00077 // declareOption(ol, "myoption", &RankLearner::myoption, OptionBase::buildoption, 00078 // "Help text describing this option"); 00079 // ... 00080 00081 // Learnt options. 00082 00083 declareOption(ol, "sorted_targets", &RankLearner::sorted_targets, OptionBase::learntoption, 00084 "The sorted targets of the training set."); 00085 00086 // Now call the parent class' declareOptions. 00087 inherited::declareOptions(ol); 00088 } 00089 00091 // build // 00093 void RankLearner::build() 00094 { 00095 inherited::build(); 00096 build_(); 00097 } 00098 00100 // build_ // 00102 void RankLearner::build_() 00103 { 00104 if (learner_ && learner_->outputsize() >= 0) { 00105 learner_output.resize(learner_->outputsize()); 00106 } 00107 // The sub-learner's target is a rank, thus of dimension 1. 00108 learner_target.resize(1); 00109 // Currently, only works with 1-dimensional targets. 00110 last_output.resize(1); 00111 } 00112 00114 // computeCostsFromOutputs // 00116 void RankLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 00117 const Vec& target, Vec& costs) const 00118 { 00119 static real desired_rank, val, frac; 00120 static int n, left, right, mid; 00121 // Find the desired rank. 00122 val = target[0]; 00123 n = sorted_targets.length(); 00124 if (val <= sorted_targets[0]) 00125 // Lowest than all targets. 00126 desired_rank = 0; 00127 else if (val >= sorted_targets[n - 1]) 00128 // Highest than all targets. 00129 desired_rank = n-1; 00130 else { 00131 // Looking for the closest targets by binary search. 00132 left = 0; 00133 right = n - 1; 00134 while (right > left + 1) { 00135 mid = (left + right) / 2; 00136 if (val < sorted_targets[mid]) 00137 right = mid; 00138 else 00139 left = mid; 00140 } 00141 if (right == left){ 00142 if (left == n - 1) 00143 left--; 00144 else 00145 right++; 00146 } 00147 frac = sorted_targets[right] - sorted_targets[left]; 00148 if (frac < 1e-30) 00149 // Equal targets, up to numerical precision. 00150 desired_rank = left; 00151 else 00152 desired_rank = left + (val - sorted_targets[left]) / frac; 00153 } 00154 learner_target[0] = desired_rank; 00155 if (!fast_exact_is_equal(last_output[0], output[0])) 00156 // This case is not handled yet. 00157 PLERROR("In RankLearner::computeCostsFromOutputs - Currently, one can only use computeCostsFromOutputs() " 00158 "after calling computeOutput."); 00159 // In this case, the sub-learner's output is the last one computed in computeOutput(). 00160 learner_->computeCostsFromOutputs(input, learner_output, learner_target, costs); 00161 } 00162 00164 // computeOutput // 00166 void RankLearner::computeOutput(const Vec& input, Vec& output) const 00167 { 00168 static real val; 00169 static int rank_inf; 00170 learner_->computeOutput(input, learner_output); 00171 #ifdef BOUNDCHECK 00172 // Safety check to ensure we are only working with 1-dimensional targets. 00173 if (learner_output.length() != 1) 00174 PLERROR("In RankLearner::computeOutput - Ranking can only work with 1-dimensional targets"); 00175 #endif 00176 val = learner_output[0]; 00177 if (val <= 0) 00178 output[0] = sorted_targets[0]; 00179 else if (val >= sorted_targets.length() - 1) 00180 output[0] = sorted_targets[sorted_targets.length() - 1]; 00181 else { 00182 rank_inf = int(val); 00183 output[0] = sorted_targets[rank_inf] + (val - rank_inf) * (sorted_targets[rank_inf + 1] - sorted_targets[rank_inf]); 00184 } 00185 last_output[0] = output[0]; 00186 } 00187 00189 // computeOutputAndCosts // 00191 void RankLearner::computeOutputAndCosts(const Vec& input, const Vec& target, 00192 Vec& output, Vec& costs) const { 00193 // TODO Optimize to take advantage of the sub-learner's method. 00194 PLearner::computeOutputAndCosts(input, target, output, costs); 00195 } 00196 00198 // forget // 00200 void RankLearner::forget() 00201 { 00202 inherited::forget(); 00203 sorted_targets.resize(0); 00204 } 00205 00207 // getTestCostNames // 00209 TVec<string> RankLearner::getTestCostNames() const 00210 { 00211 // Add 'learner.' in front of the sub-learner's costs. 00212 TVec<string> learner_costs = learner_->getTestCostNames(); 00213 TVec<string> costs(learner_costs.length()); 00214 for (int i = 0; i < costs.length(); i++) 00215 costs[i] = "learner." + learner_costs[i]; 00216 return costs; 00217 } 00218 00220 // getTrainCostNames // 00222 TVec<string> RankLearner::getTrainCostNames() const 00223 { 00224 // Add 'learner.' in front of the sub-learner's costs. 00225 TVec<string> learner_costs = learner_->getTrainCostNames(); 00226 TVec<string> costs(learner_costs.length()); 00227 for (int i = 0; i < costs.length(); i++) 00228 costs[i] = "learner." + learner_costs[i]; 00229 return costs; 00230 } 00231 00233 // makeDeepCopyFromShallowCopy // 00235 void RankLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00236 { 00237 inherited::makeDeepCopyFromShallowCopy(copies); 00238 deepCopyField(sorted_targets, copies); 00239 deepCopyField(last_output, copies); 00240 deepCopyField(learner_output, copies); 00241 deepCopyField(learner_target, copies); 00242 deepCopyField(ranked_trainset, copies); 00243 } 00244 00246 // outputsize // 00248 int RankLearner::outputsize() const 00249 { 00250 // The outputsize is the usual outputsize (the one from the training set). 00251 // Currently this can only be one, because we only deal with real targets 00252 // (they are easier to sort). 00253 return 1; 00254 } 00255 00257 // setTrainingSet // 00259 void RankLearner::setTrainingSet(VMat training_set, bool call_forget) { 00260 // Some stuff similar to EmbeddedLearner. 00261 bool training_set_has_changed = !train_set || !(train_set->looksTheSameAs(training_set)); 00262 ranked_trainset = new RankedVMatrix(training_set); 00263 learner_->setTrainingSet((RankedVMatrix *) ranked_trainset, false); 00264 if (call_forget && !training_set_has_changed) 00265 learner_->build(); 00266 // Resize work variable. 00267 if (learner_->outputsize() >= 0) 00268 learner_output.resize(learner_->outputsize()); 00269 PLearner::setTrainingSet(training_set, call_forget); 00270 } 00271 00273 // train // 00275 void RankLearner::train() { 00276 // Remember the sorted targets, because we will need them for prediction. 00277 Mat mat_sorted_targets = ranked_trainset->getSortedTargets().column(0); 00278 sorted_targets.resize(mat_sorted_targets.length()); 00279 sorted_targets << mat_sorted_targets; 00280 inherited::train(); 00281 } 00282 00283 } // end of namespace PLearn 00284 00285 00286 /* 00287 Local Variables: 00288 mode:c++ 00289 c-basic-offset:4 00290 c-file-style:"stroustrup" 00291 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00292 indent-tabs-mode:nil 00293 fill-column:79 00294 End: 00295 */ 00296 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :