PLearn 0.1
GaussianizeVMatrix.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianizeVMatrix.cc
00004 //
00005 // Copyright (C) 2006 Olivier Delalleau
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Olivier Delalleau
00036 
00040 #include "GaussianizeVMatrix.h"
00041 #include <plearn/math/pl_erf.h>
00042 #include "VMat_computeStats.h"
00043 #include <plearn/io/load_and_save.h>
00044 #include <plearn/io/fileutils.h>
00045 #include <plearn/base/RemoteDeclareMethod.h>
00046 
00047 namespace PLearn {
00048 using namespace std;
00049 
00050 PLEARN_IMPLEMENT_OBJECT(
00051     GaussianizeVMatrix,
00052     "Transforms its source VMatrix so that its features look Gaussian.",
00053 
00054     "This VMat transforms the features of its source that are obviously non-\n"
00055     "Gaussian, i.e. when the difference between the maximum and minimum\n"
00056     "value is too large compared to the standard deviation (the meaning of\n"
00057     "'too large' being controlled by the 'threshold_ratio' option).\n"
00058     "\n"
00059     "When this happens, the values of a features are sorted and their rank\n"
00060     "is used to transform them through the inverse cumulative of a normal\n"
00061     "Gaussian, resulting on a distribution that actually looks Gaussian.\n"
00062     "Note that, unless specified otherwise through the options, only the\n"
00063     "input features are transformed.\n"
00064     "\n"
00065     "It is important to note that only unique values are considered when\n"
00066     "computing the mapping, so that there is no 'hole' in the resulting\n"
00067     "distribution. This means the transformation learnt does not depend on\n"
00068     "the number of occurences of a specific value, but only on the ordering\n"
00069     "of the unique values encountered. The 'uniqueness' is defined by the\n"
00070     "PLearn 'is_equal' function, used to approximately compare real numbers.\n"
00071     "\n"
00072     "An additional 'train_source' VMat can also be specified in order to\n"
00073     "transform new data (in the 'source' option) while the transformation\n"
00074     "parameters are learned on a fixed 'train_source' VMat (e.g. when new\n"
00075     "test data are obtained and need to be properly Gaussianized).\n"
00076 );
00077 
00079 // GaussianizeVMatrix //
00081 GaussianizeVMatrix::GaussianizeVMatrix():
00082     gaussianize_input(true),
00083     gaussianize_target(false),
00084     gaussianize_weight(false),
00085     gaussianize_extra(false),
00086     gaussianize_binary(false),
00087     threshold_ratio(10),
00088     save_and_reuse_stats(true)
00089 {}
00090 
00092 // declareOptions //
00094 void GaussianizeVMatrix::declareOptions(OptionList& ol)
00095 {
00096     declareOption(ol, "threshold_ratio", &GaussianizeVMatrix::threshold_ratio,
00097                                          OptionBase::buildoption,
00098         "A source's feature will be Gaussianized when the following holds:\n"
00099         "(max - min) / stddev > threshold_ratio.");
00100 
00101     declareOption(ol, "gaussianize_input",
00102                   &GaussianizeVMatrix::gaussianize_input,
00103                   OptionBase::buildoption,
00104         "Whether or not to Gaussianize the input part.");
00105 
00106     declareOption(ol, "gaussianize_target",
00107                   &GaussianizeVMatrix::gaussianize_target,
00108                   OptionBase::buildoption,
00109         "Whether or not to Gaussianize the target part.");
00110 
00111     declareOption(ol, "gaussianize_weight",
00112                   &GaussianizeVMatrix::gaussianize_weight,
00113                   OptionBase::buildoption,
00114         "Whether or not to Gaussianize the weight part.");
00115 
00116     declareOption(ol, "gaussianize_extra",
00117                   &GaussianizeVMatrix::gaussianize_extra,
00118                   OptionBase::buildoption,
00119         "Whether or not to Gaussianize the extra part.");
00120 
00121     declareOption(ol, "save_and_reuse_stats",
00122                   &GaussianizeVMatrix::save_and_reuse_stats,
00123                   OptionBase::buildoption,
00124         "If true, will save and reuse the stats of the source.");
00125 
00126     declareOption(ol, "gaussianize_binary",
00127                   &GaussianizeVMatrix::gaussianize_binary,
00128                   OptionBase::buildoption,
00129         "Whether or not to Gaussianize binary variable.");
00130 
00131     declareOption(ol, "train_source", &GaussianizeVMatrix::train_source,
00132                                       OptionBase::buildoption,
00133         "An optional VMat that will be used instead of 'source' to compute\n"
00134         "the transformation parameters from the distribution statistics.");
00135 
00136     declareOption(ol, "fields_to_gaussianize",
00137                   &GaussianizeVMatrix::fields_to_gaussianize,
00138                   OptionBase::buildoption,
00139                   "The fields that we want to be gaussianized.");
00140 
00141     declareOption(ol, "stats_file_to_use",
00142                   &GaussianizeVMatrix::stats_file_to_use,
00143                   OptionBase::buildoption,
00144                   "The filename of the statistics to use instead of the"
00145                   " train_source.");
00146 
00147     declareOption(ol, "save_fields_gaussianized",
00148                   &GaussianizeVMatrix::save_fields_gaussianized,
00149                   OptionBase::buildoption,
00150                   "A path where we will save the fields selected to be gaussianized.");
00151 
00152     declareOption(ol, "features_to_gaussianize",
00153                   &GaussianizeVMatrix::features_to_gaussianize,
00154                   OptionBase::learntoption,
00155                   "The columsn that will be gaussianized.");
00156 
00157     declareOption(ol, "values",
00158                   &GaussianizeVMatrix::values,
00159                   OptionBase::learntoption|OptionBase::nosave,
00160                   "The values used to gaussinaze.");
00161 
00162     // Now call the parent class' declareOptions
00163     inherited::declareOptions(ol);
00164 }
00165 
00167 // build //
00169 void GaussianizeVMatrix::build()
00170 {
00171     inherited::build();
00172     build_();
00173 }
00174 
00176 // build_ //
00178 void GaussianizeVMatrix::build_()
00179 {
00180     if (!source)
00181         return;
00182 
00183     if (train_source) {
00184         source->compatibleSizeError(train_source,
00185                                     "In GaussianizeVMatrix::build_ -"
00186                                     " The source and the train_source"
00187                                     " option are not compatible.");
00188     }
00189 
00190     VMat the_source = train_source ? train_source : source;
00191 
00192     PLCHECK( the_source->inputsize() >= 0 && the_source->targetsize() >= 0 &&
00193             the_source->weightsize() >= 0 && the_source->extrasize() >= 0 );
00194 
00195     // We set the mtime to remove the warning of Mtime=0
00196     if(train_source)
00197         updateMtime(train_source);
00198     updateMtime(source);
00199 
00200     // Find which dimensions to Gaussianize.
00201     features_to_gaussianize.resize(0);
00202     int col = 0;
00203     if (gaussianize_input)
00204         features_to_gaussianize.append(
00205                 TVec<int>(col, col + the_source->inputsize() - 1, 1));
00206     col += the_source->inputsize();
00207     if (gaussianize_target)
00208         features_to_gaussianize.append(
00209                 TVec<int>(col, col + the_source->targetsize() - 1, 1));
00210     col += the_source->targetsize();
00211     if (gaussianize_weight)
00212         features_to_gaussianize.append(
00213                 TVec<int>(col, col + the_source->weightsize() - 1, 1));
00214     col += the_source->weightsize();
00215     if (gaussianize_extra)
00216         features_to_gaussianize.append(
00217                 TVec<int>(col, col + the_source->extrasize() - 1, 1));
00218     col += the_source->extrasize();
00219 
00220     // Obtain meta information from source.
00221     setMetaInfoFromSource();
00222 
00223     if((the_source->hasMetaDataDir()||hasMetaDataDir()||!stats_file_to_use.empty()||!save_and_reuse_stats) && values.size()==0)
00224         setMetaDataDir(getMetaDataDir());
00225 }
00226 
00228 // append_col_to_gaussianize //
00230 void GaussianizeVMatrix::append_col_to_gaussianize(int col, StatsCollector stat){
00231     values.append(Vec());
00232     Vec& values_j = values.lastElement();
00233     features_to_gaussianize.append(col);
00234     map<real, StatsCollectorCounts>::const_iterator it, it_dummy;
00235     // Note that we obtain the approximate counts, so that almost equal
00236     // values have been merged together already.
00237     map<real,StatsCollectorCounts>* count_map =
00238         stat.getApproximateCounts();
00239     values_j.resize(0,count_map->size());
00240     // We use a dummy iterator to get rid of the last element in the
00241     // map, which is the max real value.
00242     it_dummy = count_map->begin();
00243     it_dummy++;
00244     for (it = count_map->begin(); it_dummy != count_map->end();
00245          it++, it_dummy++)
00246     {
00247         values_j.append(it->first);
00248     }
00249 }
00250 
00252 // setMetaDataDir //
00254 void GaussianizeVMatrix::setMetaDataDir(const PPath& the_metadatadir){
00255 
00256     if(!the_metadatadir.empty())
00257         inherited::setMetaDataDir(the_metadatadir);
00258 
00259     if(features_to_gaussianize.size()==0)
00260         return;
00261 
00262     VMat the_source = train_source ? train_source : source;
00263     
00264     if((!the_source->hasMetaDataDir() && stats_file_to_use.empty()) && save_and_reuse_stats)
00265         PLERROR("In GaussianizeVMatrix::setMetaDataDir() - the "
00266                 " train_source, source or this VMatrix should have a metadata directory or save_and_reuse_stats must be false");
00267 
00268     //to save the stats their must be a metadatadir
00269     if(!the_source->hasMetaDataDir() && hasMetaDataDir()){
00270         if (train_source)
00271             the_source->setMetaDataDir(getMetaDataDir()+"train_source");
00272         else
00273             the_source->setMetaDataDir(getMetaDataDir()+"source");
00274     }
00275 
00276     TVec<StatsCollector> stats;
00277     if(!stats_file_to_use.empty()){
00278         if(!isfile(stats_file_to_use))
00279             PLERROR("In GaussianizeVMatrix::setMetaDataDir() - "
00280                     "stats_file_to_use = '%s' is not a file.",
00281                     stats_file_to_use.c_str());
00282          PLearn::load(stats_file_to_use, stats);
00283     } else if(save_and_reuse_stats)
00284         stats = the_source->
00285             getPrecomputedStatsFromFile("stats_gaussianizeVMatrix.psave", -1, true);
00286     else
00287         stats = PLearn::computeStats(the_source, -1, true);
00288 
00289     if(fields_to_gaussianize.size()>0){
00290         if(fields_to_gaussianize.size()>width())
00291            PLERROR("In GaussianizeVMatrix::setMetaDataDir() - "
00292                    "More fields in fields_to_gaussianize then the weidth()");
00293         for(int i=0;i<fields_to_gaussianize.size();i++){
00294             int field=fields_to_gaussianize[i];
00295             if(field>=width() || field<0)
00296                 PLERROR("In GaussianizeVMatrix::setMetaDataDir() - "
00297                         "bad fields number (%d) in fields_to_gaussianize!",
00298                         field);
00299         }
00300         features_to_gaussianize.resize(0,fields_to_gaussianize.length());
00301 
00302         values.resize(0);
00303         int last_j=-1;
00304         for (int i = 0; i < fields_to_gaussianize.length(); i++) {
00305             int j = fields_to_gaussianize[i];
00306             StatsCollector& stat = stats[j];
00307             if(last_j+1!=j)
00308                 for(int k=last_j+1;k<j;k++){
00309                     //to keep the total memory used lower faster.
00310                     stats[k].forget();
00311                 }
00312             append_col_to_gaussianize(j,stat);
00313             stats[j].forget();//to keep the total memory used lower.
00314         }
00315     }else{
00316 
00317         // See which dimensions violate the Gaussian assumption and will be
00318         // actually Gaussianized, and store the corresponding list of values.
00319         TVec<int> candidates = features_to_gaussianize.copy();
00320         features_to_gaussianize.resize(0);
00321         values.resize(0);
00322         for (int i = 0; i < candidates.length(); i++) {
00323             int j = candidates[i];
00324             StatsCollector& stat = stats[j];
00325             if (fast_exact_is_equal(stat.stddev(), 0)){
00326                 //we don't gaussianize
00327             }else if (!gaussianize_binary && stat.isbinary()) {
00328                 //we don't gaussianize
00329             }else if ((stat.max() - stat.min()) > threshold_ratio * stat.stddev()) {
00330                 append_col_to_gaussianize(j,stat);
00331             }
00332 
00333             stats[j].forget();//to keep the total memory used lower.
00334         }
00335     }
00336 
00337     fields_gaussianized.resize(width());
00338     fields_gaussianized.fill(-1);
00339     for(int i=0;i<features_to_gaussianize.size();i++)
00340         fields_gaussianized[features_to_gaussianize[i]]=i;
00341     if(!save_fields_gaussianized.empty()){
00342         PLearn::save(save_fields_gaussianized,features_to_gaussianize);
00343     }
00344     if(features_to_gaussianize.size()==0)
00345         PLWARNING("GaussianizeVMatrix::build_() 0 variable was gaussianized");
00346 }
00347 
00349 // getNewRow //
00351 void GaussianizeVMatrix::getNewRow(int i, const Vec& v) const
00352 {
00353     if(values.size()==0 && features_to_gaussianize.size()>0)
00354         PLERROR("In GaussianizeVMatrix::getNewRow() - We don't have been build correctly. Try to set a metadatadir or set save_and_reuse_stats=0.");
00355     PLASSERT( source );
00356     source->getRow(i, v);
00357     for (int k = 0; k < features_to_gaussianize.length(); k++) {
00358         int j = features_to_gaussianize[k];
00359         real current_val = v[j];
00360         if (is_missing(current_val))
00361             continue;
00362         // Find closest values in the training data.
00363         Vec& values_j = values[k];
00364         real interpol;
00365         if (current_val < values_j[0]) {
00366             // Smaller than the minimum.
00367             interpol = 0;
00368         } else if (current_val > values_j.lastElement()) {
00369             // Higher than the maximum.
00370             interpol = 1;
00371         } else {
00372             int min = 0;
00373             int max = values_j.length() - 1;
00374             while (max - min > 1) {
00375                 int mid = (max + min) / 2;
00376                 real mid_val = values_j[mid];
00377                 if (current_val < mid_val)
00378                     max = mid;
00379                 else if (current_val > mid_val)
00380                     min = mid;
00381                 else {
00382                     // Found the exact value.
00383                     min = max = mid;
00384                 }
00385             }
00386             if (min == max)
00387                 interpol = min;
00388             else {
00389                 PLASSERT( max - min == 1 );
00390                 interpol = (current_val - values_j[min]) /
00391                           (values_j[max] - values_j[min]) + min;
00392                 PLASSERT( !is_missing(interpol) );
00393             }
00394         }
00395         interpol /= (values_j.length() - 1);
00396         PLASSERT( interpol >= 0 && interpol <= 1 );
00397         // The expectation of the minimum and maximum of n numbers taken from a
00398         // uniform(0,1) distribution are respectively 1/n+1 and n/n+1: we shift
00399         // and rescale 'interpol' to be in [1/n+1, n/n+1] before using the
00400         // inverse of the Gaussian cumulative function.
00401         real n = values_j.length();
00402         interpol = (n - 1) / (n + 1) * interpol + 1 / (n + 1);
00403         v[j] = fast_gauss_01_quantile(interpol);
00404     }
00405 }
00406 
00408 // makeDeepCopyFromShallowCopy //
00410 void GaussianizeVMatrix::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00411 {
00412     inherited::makeDeepCopyFromShallowCopy(copies);
00413     deepCopyField(train_source, copies);
00414     //features_to_gaussianize?
00415     //scaling_factor?
00416     //values?
00417 }
00418 
00419 
00421 // unGauss //
00423 real GaussianizeVMatrix::unGauss(real input, int j) const
00424 {
00425     int k=fields_gaussianized[j];
00426     if(k<0)
00427         return input;//was not gaussianized
00428     
00429     real interpol = gauss_01_cum(input);
00430     Vec& values_j = values[k];
00431     int idx=int(interpol*values_j.length());
00432     return values_j[idx];
00433 }
00434 
00436 // unGauss //
00438 void GaussianizeVMatrix::unGauss(Vec& inputs, Vec& ret, int j) const
00439 {
00440     int k=fields_gaussianized[j];
00441     if(k<0)
00442         ret<<inputs;//was not gaussianized
00443     
00444     for(int i=0;i<inputs.size();i++){
00445         real value = inputs[i];
00446         real interpol = gauss_01_cum(value);
00447         Vec& values_j = values[k];
00448         int idx=int(interpol*values_j.length());
00449         ret[i]=values_j[idx];
00450     }
00451    
00452 }
00453 
00455 real GaussianizeVMatrix::remote_unGauss(real value, int col) const
00456 {
00457     return unGauss(value,col);
00458 }
00459 
00461 Vec GaussianizeVMatrix::remote_unGauss_vec(Vec values, int col) const
00462 {
00463     Vec outputs(values.length());
00464     unGauss(values,outputs,col);
00465     return outputs;
00466 }
00467 
00469 // declareMethods //
00471 void GaussianizeVMatrix::declareMethods(RemoteMethodMap& rmm)
00472 {
00473     // Insert a backpointer to remote methods; note that this is different from
00474     // declareOptions().
00475     rmm.inherited(inherited::_getRemoteMethodMap_());
00476 
00477     declareMethod(
00478         rmm, "unGauss", &GaussianizeVMatrix::remote_unGauss,
00479         (BodyDoc("Revert the gaussinisation done."),
00480          ArgDoc ("value", "The value to revert."),
00481          ArgDoc ("j", "The column of the value.")));
00482 
00483 
00484     declareMethod(
00485         rmm, "unGauss2", &GaussianizeVMatrix::remote_unGauss_vec,
00486         (BodyDoc("Revert the gaussinisation done."),
00487          ArgDoc ("values", "A vector of values to revert."),
00488          ArgDoc ("j", "The column of the value.")));
00489 
00490 }
00491 
00492 
00493 } // end of namespace PLearn
00494 
00495 
00496 /*
00497   Local Variables:
00498   mode:c++
00499   c-basic-offset:4
00500   c-file-style:"stroustrup"
00501   c-file-offsets:((innamespace . 0)(inline-open . 0))
00502   indent-tabs-mode:nil
00503   fill-column:79
00504   End:
00505 */
00506 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines