PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // BinaryStump.cc 00004 // 00005 // Copyright (C) 2004 Hugo Larochelle 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: BinaryStump.cc 6846 2007-04-06 13:38:36Z tihocan $ 00037 ******************************************************* */ 00038 00039 // Authors: Hugo Larochelle 00040 00044 #include "BinaryStump.h" 00045 00046 #define PL_LOG_MODULE_NAME "BinaryStump" 00047 #include <plearn/io/pl_log.h> 00048 00049 namespace PLearn { 00050 using namespace std; 00051 00052 00053 void qsort_vec(TVec< pair<int, real> > v, TVec< pair<int,int> > buffer) 00054 { 00055 TVec< pair<int,real> > temp(v.length()); 00056 temp << v; 00057 real pivot = temp[0].second; 00058 int first = 0; 00059 int last = v.length()-1; 00060 for(int i=1; i<v.length(); i++) 00061 if(temp[i].second >= pivot) 00062 v[last--]=temp[i]; 00063 else 00064 v[first++]=temp[i]; 00065 00066 if(first != last) 00067 PLERROR("OUPS!!"); 00068 00069 v[first] = temp[0]; 00070 00071 int it = 0; 00072 pair<int,int> inf_sup; 00073 if(first != 0) 00074 { 00075 inf_sup.first = 0; 00076 inf_sup.second = first; 00077 buffer[it] = inf_sup; 00078 it++; 00079 } 00080 if(last!=temp.length()-1) 00081 { 00082 inf_sup.first = last+1; 00083 inf_sup.second = v.length()-1-last; 00084 buffer[it] = inf_sup; 00085 it++; 00086 } 00087 00088 while(it > 0) 00089 { 00090 it--; 00091 temp.resize(buffer[it].second); 00092 temp << v.subVec(buffer[it].first,buffer[it].second); 00093 pivot = temp[0].second; 00094 first = buffer[it].first; 00095 last = buffer[it].first+buffer[it].second-1; 00096 for(int i=1; i<buffer[it].second; i++) 00097 if(temp[i].second >= pivot) 00098 v[last--]=temp[i]; 00099 else 00100 v[first++]=temp[i]; 00101 00102 if(first != last) 00103 PLERROR("OUPS!!"); 00104 00105 v[first] = temp[0]; 00106 00107 int this_it = it; 00108 00109 if(first != buffer[this_it].first) 00110 { 00111 inf_sup.first = buffer[this_it].first; 00112 inf_sup.second = first-buffer[this_it].first; 00113 buffer[it] = inf_sup; 00114 it++; 00115 } 00116 if(last!= buffer[this_it].first+temp.length()-1) 00117 { 00118 inf_sup.first = last+1; 00119 inf_sup.second = buffer[this_it].first+temp.length()-1-last; 00120 buffer[it] = inf_sup; 00121 it++; 00122 } 00123 } 00124 00125 } 00126 00128 // BinaryStump // 00130 BinaryStump::BinaryStump(): 00131 feature(0), 00132 tag(0), 00133 threshold(0), 00134 one_hot_output(false) 00135 {} 00136 00137 PLEARN_IMPLEMENT_OBJECT(BinaryStump, "Binary stump classifier", 00138 "This algorithm finds the most accurate binary stump\n" 00139 "that classifies to a certain tag (0 or 1)\n" 00140 "every points that have a certain feature (coordinate)\n" 00141 "higher than a learned threshold.\n" 00142 "The tag, feature and threshold are chosen to minimize\n" 00143 "the weighted classification error.\n" 00144 "Only the first target is considered, the others are \n" 00145 "ignored.\n"); 00146 00148 // declareOptions // 00150 void BinaryStump::declareOptions(OptionList& ol) 00151 { 00152 declareOption(ol, "one_hot_output", &BinaryStump::one_hot_output, 00153 OptionBase::buildoption, 00154 "If set to 1, the output will be a two-dimensional one-hot vector\n" 00155 "instead of just a single number."); 00156 00157 declareOption(ol, "feature", &BinaryStump::feature, OptionBase::learntoption, 00158 "Feature tested by the stump"); 00159 00160 declareOption(ol, "threshold", &BinaryStump::threshold, 00161 OptionBase::learntoption, 00162 "Threshold for decision"); 00163 00164 declareOption(ol, "tag", &BinaryStump::tag, OptionBase::learntoption, 00165 "Tag assigned when feature is lower than the threshold"); 00166 00167 inherited::declareOptions(ol); 00168 } 00169 00170 void BinaryStump::build_(){} 00171 00172 void BinaryStump::build() 00173 { 00174 inherited::build(); 00175 build_(); 00176 } 00177 00178 00180 // makeDeepCopyFromShallowCopy // 00182 void BinaryStump::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00183 { 00184 inherited::makeDeepCopyFromShallowCopy(copies); 00185 } 00186 00187 00189 // outputsize // 00191 int BinaryStump::outputsize() const 00192 { 00193 if (one_hot_output) 00194 return 2; 00195 else 00196 return 1; 00197 } 00198 00200 // forget // 00202 void BinaryStump::forget() 00203 { 00204 stage = 0; 00205 feature = 0; 00206 tag = 0; 00207 threshold = 0; 00208 } 00209 00211 // train // 00213 void BinaryStump::train() 00214 { 00215 00216 if(!train_set) 00217 PLERROR("In BinaryStump:train() : train_set not specified"); 00218 00219 if(!train_stats) // make a default stats collector, in case there's none 00220 train_stats = new VecStatsCollector(); 00221 train_stats->forget(); 00222 00223 int n = train_set->length(); 00224 sf.resize(n); 00225 //static Vec input; input.resize(inputsize()); 00226 //static Vec target; target.resize(targetsize()); 00227 real input; 00228 //real weight; 00229 Vec train_target(n); 00230 TVec< pair<int,int> > buffer((int)(n*safeflog(n))); 00231 00232 static Vec example_weights; example_weights.resize(n); 00233 00234 // Extracting weights 00235 if(train_set->weightsize() > 0) 00236 { 00237 for (int i=0; i<n; ++i) 00238 { 00239 //train_set->getExample(i, input, target, weight); 00240 //example_weights[i]=weight; 00241 example_weights[i]= train_set->get(i,inputsize_+targetsize_); 00242 } 00243 } 00244 else 00245 { 00246 example_weights.fill(1.0/n); 00247 } 00248 00249 for (int i=0; i<n; ++i) 00250 { 00251 train_target[i]= train_set->get(i,inputsize_); 00252 if(!fast_exact_is_equal(train_target[i], 0) && 00253 !fast_exact_is_equal(train_target[i], 1)) 00254 PLERROR("In BinaryStump:train() : target should be either 1 or 0"); 00255 } 00256 00257 // Choosing best stump 00258 00259 real best_error = 0; 00260 00261 { 00262 real w_sum_1 = 0; 00263 real w_sum_error = 0; 00264 real w_sum = 0; 00265 00266 for(int i=0; i<n; i++) 00267 { 00268 w_sum += example_weights[i]; 00269 //train_set->getExample(i,input,target,weight); 00270 //if(target[0] == 1) 00271 if(fast_exact_is_equal(train_target[i], 1)) 00272 w_sum_1 += example_weights[i]; 00273 } 00274 00275 if(w_sum_1 > w_sum - w_sum_1) 00276 { 00277 tag = 0; 00278 w_sum_error = w_sum - w_sum_1; 00279 } 00280 else 00281 { 00282 tag = 1; 00283 w_sum_error = w_sum_1; 00284 } 00285 00286 best_error = w_sum_error; 00287 00288 // We choose as the first stump to consider, the stump that classifies 00289 // in the most frequent class 00290 // every points which have their first coordinate greater than 00291 // the smallest value for this coordinate in the training set MINUS ONE. 00292 // This approximatly corresponds to classify any points to the most 00293 // frequent class. 00294 00295 feature = 0; 00296 threshold = sf[0].second-1; // TODO Why? (done below already?) 00297 PP<ProgressBar> pb; 00298 if(report_progress) 00299 pb = new ProgressBar("Finding best stump",inputsize()*sf.length()); 00300 int prog = 0; 00301 for(int d=0; d<inputsize(); d++) 00302 { 00303 00304 // Copying input 00305 for(int j=0; j<n; j++) 00306 { 00307 //train_set->getExample(j,input, target, weight); 00308 input = train_set->get(j,d); 00309 //if(target[0] != 0 & target[0] != 1) 00310 sf[j].first = j; 00311 //sf[j].second = input[d]; 00312 sf[j].second = input; 00313 } 00314 00315 00316 00317 // Sorting features 00318 //for(int i=0; i<sf.length();i++) 00319 qsort_vec(sf,buffer); 00320 00321 if(d==0) { // initialize threshold 00322 threshold = sf[0].second-1; 00323 DBG_MODULE_LOG << "Initializing threshold <- " << threshold << 00324 endl; 00325 } 00326 00327 real w_sum_l_1 = 0; 00328 real w_sum_l = 0; 00329 00330 for(int i=0; i<sf.length()-1; i++) 00331 { 00332 00333 real f1 = sf[i].second; 00334 real f2 = sf[i+1].second; 00335 00336 //train_set->getExample(sf[i].first,input,target,weight); 00337 //target = train_set->getExample(sf[i].first,inputsize_); 00338 //real classe = target[0]; 00339 real classe = train_target[sf[i].first]; 00340 if(fast_exact_is_equal(classe, 1)) 00341 w_sum_l_1+=example_weights[sf[i].first]; 00342 w_sum_l += example_weights[sf[i].first]; 00343 00344 if(fast_exact_is_equal(f1, f2)) 00345 continue; 00346 00347 real w_sum_error_1 = w_sum_l - w_sum_l_1 + w_sum_1 - w_sum_l_1; 00348 real c_w_sum_error = 0; 00349 if(w_sum_error_1 > w_sum - w_sum_error_1) 00350 { 00351 c_w_sum_error = w_sum - w_sum_error_1; 00352 00353 } 00354 else 00355 { 00356 c_w_sum_error = w_sum_error_1; 00357 } 00358 00359 // We choose the first stump that minimizes the 00360 // weighted error. 00361 if (best_error > c_w_sum_error) 00362 { 00363 best_error = c_w_sum_error; 00364 00365 tag = w_sum_error_1 > w_sum - w_sum_error_1 ? 0 : 1; 00366 threshold = (f1+f2)/2; 00367 DBG_MODULE_LOG << "Updating treshold <- " << threshold << 00368 " (c_w_sum_error = " << c_w_sum_error << 00369 ", best_error = " << best_error << ")" << endl; 00370 00371 feature = d; 00372 } else { 00373 DBG_MODULE_LOG << "No update (c_w_sum_error = " << 00374 c_w_sum_error << ", best_error = " << best_error << ")" 00375 << endl; 00376 } 00377 } 00378 prog++; 00379 if(report_progress) pb->update(prog); 00380 } 00381 } 00382 00383 Vec costs(1); costs[0] = best_error; 00384 train_stats->update(costs); 00385 train_stats->finalize(); 00386 if(verbosity > 1) 00387 cout << "Weighted error = " << best_error << endl; 00388 sf = TVec< pair<int, real> >(0); 00389 } 00390 00391 00393 // computeOutput // 00395 void BinaryStump::computeOutput(const Vec& input, Vec& output) const 00396 { 00397 output.resize(outputsize()); 00398 int predict = input[feature] < threshold ? tag : 1 - tag; 00399 if (one_hot_output) { 00400 output[predict] = 1; 00401 output[1 - predict] = 0; 00402 } else 00403 output[0] = predict; 00404 } 00405 00407 // computeCostsFromOutputs // 00409 void BinaryStump::computeCostsFromOutputs(const Vec& input, const Vec& output, 00410 const Vec& target, Vec& costs) const 00411 { 00412 costs.resize(1); 00413 00414 if(!fast_exact_is_equal(target[0], 0) && 00415 !fast_exact_is_equal(target[0], 1)) 00416 PLERROR("In BinaryStump:computeCostsFromOutputs() : " 00417 "target should be either 1 or 0"); 00418 00419 real predict = one_hot_output ? argmin(output) : output[0]; 00420 costs[0] = !is_equal(predict, target[0]); 00421 } 00422 00424 // getTestCostNames // 00426 TVec<string> BinaryStump::getTestCostNames() const 00427 { 00428 return getTrainCostNames(); 00429 } 00430 00432 // getTrainCostNames // 00434 TVec<string> BinaryStump::getTrainCostNames() const 00435 { 00436 static TVec<string> costs; 00437 if (costs.isEmpty()) 00438 costs.append("binary_class_error"); 00439 return costs; 00440 } 00441 00442 } // end of namespace PLearn 00443 00444 00445 /* 00446 Local Variables: 00447 mode:c++ 00448 c-basic-offset:4 00449 c-file-style:"stroustrup" 00450 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00451 indent-tabs-mode:nil 00452 fill-column:79 00453 End: 00454 */ 00455 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :