PLearn 0.1
UniformizeLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // UniformizeLearner.cc
00004 //
00005 // Copyright (C) 2004 ApSTAT Technologies Inc. 
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: UniformizeLearner.cc 10248 2009-06-09 18:16:11Z nouiz $ 
00037  ******************************************************* */
00038 
00039 // Authors: Pascal Vincent
00040 
00043 #include "UniformizeLearner.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 UniformizeLearner::UniformizeLearner() 
00049     :weight_field_index(-1),
00050      nquantiles(200),
00051      raw_inputs_as_output(false)
00052 {
00053     // ...
00054 
00055     // ### You may or may not want to call build_() to finish building the object
00056     // build_();
00057 }
00058 
00059 PLEARN_IMPLEMENT_OBJECT(UniformizeLearner, "Uniformizes selected input fields", 
00060                         "For each specified field, the full training set column will be read,\n"
00061                         "then sorted, and we'll store up to nquantiles and their mapping to [0,1] rank (as well as min and max)\n"
00062                         "Uniformization maps to [0,1]. It is a piecewise linear interpolation between the remembered quantiles\n"
00063                         "Work with missing value. We don't map them");
00064 
00065 void UniformizeLearner::declareOptions(OptionList& ol)
00066 {
00067     // ### Declare all of this object's options here
00068     // ### For the "flags" of each option, you should typically specify  
00069     // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00070     // ### OptionBase::tuningoption. Another possible flag to be combined with
00071     // ### is OptionBase::nosave
00072 
00073     // ### ex:
00074     // declareOption(ol, "myoption", &UniformizeLearner::myoption, OptionBase::buildoption,
00075     //               "Help text describing this option");
00076     // ...
00077 
00078     //build
00079     
00080     declareOption(ol, "which_fieldnames", &UniformizeLearner::which_fieldnames, OptionBase::buildoption,
00081                   "The names of the fields to uniformize.\n"
00082                   "If both which_fieldnames and which_fieldnums are empty, all fields are normalized.");
00083     declareOption(ol, "which_fieldnums", &UniformizeLearner::which_fieldnums, OptionBase::buildoption,
00084                   "The indexes of the fields to uniformize. Leave this option empty if you specify which_fieldnames.\n"
00085                   "If both which_fieldnames and which_fieldnums are empty, all fields are normalized.");
00086     declareOption(ol, "nquantiles", &UniformizeLearner::nquantiles, OptionBase::buildoption,
00087                   "How many intervals to use to divide the sorted values");
00088 
00089     declareOption(ol, "raw_inputs_as_output", &UniformizeLearner::raw_inputs_as_output, OptionBase::buildoption,
00090                   "If true, raw inputs are appended to uniformized outputs for all uniformized fields.");
00091 
00092     //learnt
00093 
00094     declareOption(ol, "val_to_rank", &UniformizeLearner::val_to_rank, OptionBase::learntoption,
00095                   "Remembers mapping between a few values and their [0,1] ranking.");
00096 
00097     declareOption(ol, "input_field_names", &UniformizeLearner::input_field_names, OptionBase::learntoption,
00098                   "Remembers the names of the input fields.");
00099 
00100     // Now call the parent class' declareOptions
00101     inherited::declareOptions(ol);
00102 }
00103 
00104 void UniformizeLearner::build_()
00105 {
00106     
00107 }
00108 
00109 // ### Nothing to add here, simply calls build_
00110 void UniformizeLearner::build()
00111 {
00112     inherited::build();
00113     build_();
00114 }
00115 
00116 
00117 void UniformizeLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00118 {
00119     inherited::makeDeepCopyFromShallowCopy(copies);
00120 
00121     deepCopyField(val_to_rank,      copies);
00122     deepCopyField(which_fieldnames, copies);
00123     deepCopyField(which_fieldnums,  copies);
00124     deepCopyField(input_field_names,  copies);
00125 }
00126 
00127 
00129 // outputsize //
00131 int UniformizeLearner::outputsize() const
00132 {
00133     int nk= 0;
00134     if(raw_inputs_as_output)
00135     {
00136         nk= which_fieldnames.length();
00137         if(nk == 0)
00138             nk= which_fieldnums.size();
00139         if(nk == 0)//no field specified: uniformize all
00140             nk= inputsize();
00141     }
00142     return nk+inputsize();
00143 }
00144 
00146 // forget //
00148 void UniformizeLearner::forget()
00149 {
00150     stage = 0; // untrained
00151 }
00152 
00154 // setTrainingSet //
00156 void UniformizeLearner::setTrainingSet(VMat training_set, bool call_forget)
00157 {
00158     inherited::setTrainingSet(training_set, call_forget);
00159     VMat dataset = getTrainingSet();
00160 
00161     if(dataset->weightsize() > 1)
00162         PLERROR("In UniformizeLearner::setTrainingSet: Only one weight supported.");
00163 
00164     if(train_set->weightsize() > 0)
00165         weight_field_index= dataset->fieldIndex(dataset->weightFieldNames()[0]);
00166     
00167     input_field_names.resize(dataset->inputsize());
00168     input_field_names << dataset->inputFieldNames();
00169 
00170     int nk = which_fieldnames.length();
00171     if(nk==0)
00172         nk = which_fieldnums.size();
00173     else
00174     {
00175         which_fieldnums.resize(nk);
00176         for(int k=0; k<nk; k++)
00177             which_fieldnums[k] = train_set->getFieldIndex(which_fieldnames[k]);
00178     }
00179 
00180     if(nk == 0)//no field specified, uniformize all.
00181     {
00182         nk= train_set->inputsize();
00183         which_fieldnums.resize(nk);
00184         for(int k= 0; k < nk; ++k)
00185             which_fieldnums[k]= k;
00186     }
00187 }
00188     
00190 // train //
00192 void UniformizeLearner::train()
00193 {
00194     // The role of the train method is to bring the learner up to stage==nstages,
00195     // updating train_stats with training costs measured on-line in the process.
00196 
00197     /* TYPICAL CODE:      
00198        static Vec input  // static so we don't reallocate/deallocate memory each time...
00199        static Vec target
00200        input.resize(inputsize())    // the train_set's inputsize()
00201        target.resize(targetsize())  // the train_set's targetsize()
00202        real weight
00203 
00204        if(!train_stats)  // make a default stats collector, in case there's none
00205        train_stats = new VecStatsCollector()
00206 
00207        if(nstages<stage) // asking to revert to a previous stage!
00208        forget()  // reset the learner to stage=0
00209 
00210        while(stage<nstages)
00211        {
00212        // clear statistics of previous epoch
00213        train_stats->forget() 
00214           
00215        //... train for 1 stage, and update train_stats,
00216        // using train_set->getSample(input, target, weight)
00217        // and train_stats->update(train_costs)
00218           
00219        ++stage
00220        train_stats->finalize() // finalize statistics for this epoch
00221        }
00222     */
00223 
00224     if(stage==0) // untrained
00225     {
00226         int nk = which_fieldnames.length();
00227         if(nk==0)
00228             nk = which_fieldnums.size();
00229         else
00230         {
00231             which_fieldnums.resize(nk);
00232             for(int k=0; k<nk; k++)
00233                 which_fieldnums[k] = train_set->getFieldIndex(which_fieldnames[k]);
00234         }
00235 
00236         if(nk == 0)//no field specified, uniformize all.
00237         {
00238             nk= train_set->inputsize();
00239             which_fieldnums.resize(nk);
00240             for(int k= 0; k < nk; ++k)
00241                 which_fieldnums[k]= k;
00242         }
00243 
00244 
00245         int l = train_set->length();
00246 
00247         bool weighted= train_set->weightsize() == 1;
00248 
00249         static Vec colw;
00250         if(weighted)
00251         {
00252             colw.resize(l);
00253             train_set->getColumn(weight_field_index, colw);
00254         }
00255 
00256         static Vec colv;
00257         colv.resize(l);
00258       
00259         val_to_rank.resize(nk);
00260         for(int k=0; k<nk; k++)
00261         {
00262             train_set->getColumn(which_fieldnums[k],colv);
00263             if(weighted)
00264                 computeWeightedRankMap(colv, nquantiles, val_to_rank[k], colw);
00265             else
00266                 computeRankMap(colv, nquantiles, val_to_rank[k]);
00267         }
00268         stage = 1; // trained
00269     }
00270 }
00271 
00273 // v_no_missing //
00275 Vec UniformizeLearner::v_no_missing;
00276 
00278 // computeRankMap //
00280 void UniformizeLearner::computeRankMap(const Vec& v, int nquantiles,
00281                                        map<real,real>& rankmap)
00282 {
00283     v_no_missing.resize(v.length()); // Allocate enough memory.
00284     if (!v.hasMissing())
00285         v_no_missing << v;
00286     else {
00287         v_no_missing.resize(0);
00288         for (int i = 0; i < v.length(); i++)
00289             if (!is_missing(v[i]))
00290                 v_no_missing.append(v[i]);
00291     }
00292     rankmap.clear();
00293     int max_index = v_no_missing.length() - 1;
00294     sortElements(v_no_missing);
00295     rankmap[v_no_missing[0]] = 0;
00296     rankmap[v_no_missing[max_index]] = 1;
00297     for(int k=1; k<nquantiles; k++)
00298     {
00299         real rank = real(k)/real(nquantiles);
00300         int pos = int(round(rank * max_index));
00301         real val = v_no_missing[pos];
00302         if(rankmap.find(val) == rankmap.end())
00303             rankmap[val] = rank;
00304     }
00305 }
00306 
00307 
00308 void UniformizeLearner::computeWeightedRankMap(const Vec& v, int nquantiles, map<real,real>& rankmap, const Vec& weights)
00309 {
00310     int l= v.length();
00311 
00312     Mat vw(0, 2);
00313 
00314     if (!v.hasMissing())
00315     {
00316         vw.resize(l,2);
00317         vw.column(0) << v;
00318         vw.column(1) << weights;
00319 
00320     }
00321     else 
00322     {
00323         Vec vvw(2);
00324         for (int i = 0; i < l; i++)
00325             if (!is_missing(v[i]))
00326             {
00327                 vvw[0]= v[i];
00328                 vvw[1]= weights[i];
00329                 vw.appendRow(vvw);
00330             }
00331     }
00332 
00333     
00334 
00335     rankmap.clear();
00336     int max_index = vw.length() - 1;
00337     sortRows(vw, TVec<int>(1,0));
00338 
00339     for (int i = 1; i < l; i++)
00340         vw(i,1)+= vw(i-1,1);
00341 
00342     rankmap[vw(0,0)] = 0;
00343     rankmap[vw(max_index,0)] = 1;
00344     real totw= vw(max_index,1);
00345 
00346     for(int k=1, i= 0; k<nquantiles; ++k)
00347     {
00348         real rank = real(k)/real(nquantiles);
00349         real qw= totw*rank;
00350         while(vw(i,1) < qw)
00351             ++i;
00352 
00353         real val = vw(i,0);
00354 
00355         rank= vw(i,1)/totw;
00356 
00357         if(rankmap.find(val) == rankmap.end())
00358             rankmap[val]= rank;
00359     }
00360 }
00361 
00362 
00364 // mapToRank //
00366 real UniformizeLearner::mapToRank(real val, const map<real,real>& rankmap)
00367 {
00368     PLASSERT( !is_missing(val) );
00369     real minv = rankmap.begin()->first;
00370     if(val<=minv)
00371         return 0;
00372     real maxv = rankmap.rbegin()->first;
00373     if(val>=maxv)
00374         return 1;
00375     map<real,real>::const_iterator high = rankmap.upper_bound(val);
00376     map<real,real>::const_iterator low = high; --low;
00377 
00378     real rank = low->second + (val-low->first)*(high->second-low->second)/(high->first-low->first);
00379     return rank;
00380 }
00381 
00383 // computeOutput //
00385 void UniformizeLearner::computeOutput(const Vec& input, Vec& output) const
00386 {
00387 /*
00388     int nout = outputsize();
00389     output.resize(nout);
00390     output << input;
00391     for(int k=0; k<which_fieldnums.size(); k++)
00392     {
00393         int fieldnum = which_fieldnums[k];
00394         if (!is_missing(output[fieldnum]))
00395             output[fieldnum] = mapToRank(output[fieldnum], val_to_rank[k]);
00396     }
00397 */
00398     int n= outputsize();
00399     output.resize(n);
00400     int nk= which_fieldnums.size();
00401     for(int k= 0; k < nk; ++k){
00402         real val=input[which_fieldnums[k]];
00403         if(is_missing(val))
00404             output[k] = MISSING_VALUE;
00405         else
00406             output[k] = mapToRank(val, val_to_rank[k]);
00407     }
00408     for(int k= nk; k < n; ++k)
00409         output[k]= input[k-nk];
00410 
00411 }    
00412 
00414 // computeCostsFromOutputs //
00416 void UniformizeLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00417                                                 const Vec& target, Vec& costs) const
00418 {
00419     costs.resize(0);
00420 }                                
00421 
00422 TVec<string> UniformizeLearner::getTestCostNames() const
00423 {
00424     static TVec<string> nocosts;
00425     return nocosts;
00426 }
00427 
00428 TVec<string> UniformizeLearner::getTrainCostNames() const
00429 {
00430     static TVec<string> nocosts;
00431     return nocosts;
00432 }
00433 
00434 
00435 TVec<string> UniformizeLearner::getOutputNames() const
00436 {
00437     int n = outputsize();
00438     TVec<string> outnames(n);
00439     int nk= which_fieldnums.size();
00440 
00441     for(int k= 0; k < nk; ++k)
00442         outnames[k]= string("uniformized_")+input_field_names[which_fieldnums[k]];
00443     for(int k= nk; k < n; ++k)
00444         outnames[k]= input_field_names[k-nk];
00445 
00446     return outnames;
00447 }
00448 
00449 
00450 
00451 } // end of namespace PLearn
00452 
00453 
00454 /*
00455   Local Variables:
00456   mode:c++
00457   c-basic-offset:4
00458   c-file-style:"stroustrup"
00459   c-file-offsets:((innamespace . 0)(inline-open . 0))
00460   indent-tabs-mode:nil
00461   fill-column:79
00462   End:
00463 */
00464 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines