PLearn 0.1
FieldConvertCommand.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // Copyright (C) 2004 Université de Montréal
00004 //
00005 // Redistribution and use in source and binary forms, with or without
00006 // modification, are permitted provided that the following conditions are met:
00007 //
00008 //  1. Redistributions of source code must retain the above copyright
00009 //     notice, this list of conditions and the following disclaimer.
00010 //
00011 //  2. Redistributions in binary form must reproduce the above copyright
00012 //     notice, this list of conditions and the following disclaimer in the
00013 //     documentation and/or other materials provided with the distribution.
00014 //
00015 //  3. The name of the authors may not be used to endorse or promote
00016 //     products derived from this software without specific prior written
00017 //     permission.
00018 //
00019 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00020 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00021 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00022 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00023 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00024 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00025 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00026 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00027 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00028 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00029 //
00030 // This file is part of the PLearn library. For more information on the PLearn
00031 // library, go to the PLearn Web site at www.plearn.org
00032 
00033 /* *******************************************************
00034  * $Id: FieldConvertCommand.cc 9192 2008-07-02 16:48:44Z nouiz $
00035  ******************************************************* */
00036 
00037 #include "FieldConvertCommand.h"
00038 #include <plearn/base/stringutils.h>
00039 #include <plearn/db/getDataSet.h>
00040 #include <plearn/io/openFile.h>
00041 #include <plearn/io/fileutils.h>      
00042 #include <plearn/math/pl_erf.h>       
00043 #include <plearn/math/random.h>
00044 #include <plearn/vmat/SelectRowsVMatrix.h>
00045 #include <plearn/vmat/VMat.h>
00046 
00047 #define NORMALIZE 1
00048 #define MISSING_BIT 2
00049 #define ONEHOT 4
00050 #define SKIP 16
00051 #define UNIFORMIZE 32
00052 
00053 using namespace PLearn;
00054 
00056 PLearnCommandRegistry FieldConvertCommand::reg_(new FieldConvertCommand);
00057 
00058 
00060 // FieldConvertCommand //
00062 FieldConvertCommand::FieldConvertCommand()
00063     :  PLearnCommand("FieldConvert",
00064 
00065                      "Reads a dataset and generates a .vmat file based on the data, but optimized for training.\n",
00066 
00067                      "The nature of each field of the original dataset is automatically detected, and determines the approriate treatment.\n"
00068                      "The possible field types with the corresponding treatment can be one of :\n"
00069                      "continuous      - quantitative data (data is real): the field is replaced by the normalized data (minus means, divided by stddev)\n"
00070                      "binary          - binary discrete data (is processed as a continuous field)\n"
00071                      "discrete_uncorr - discrete integers (qualitative data, e.g : postal codes, categories) not corr. with target: the field is replaced by a group of fields in a one-hot fashion.\n"
00072                      "discrete_corr   - discrete integers, correlated with target : both the normalized and the onehot versions of the field are used in the new dataset\n"
00073                      "constant        - constant data : the field is skipped (it is not present in the new dataset)\n"
00074                      "skip            - irrelevant data : the field is skipped (it is not present in the new dataset)\n"
00075                      "\n"
00076                      "When there are ambiguities, messages are displayed for the problematic field(s) and they are skipped. The user must use a 'force' file,\n"
00077                      "to explicitely force the types of the ambiguous field(s). The file is made of lines of the following possible formats:\n"
00078                      "FIELDNAME=type\n"
00079                      "fieldNumberA-fieldNumberB=type   [e.g : 200-204=constant, to force a range]\n"
00080                      "FIELDNAME+=\"processing\" (n_inputs) [to add a home-made processing after a field; the number of inputs thus added must be given]\n"
00081                      "\n"
00082                      "Note that for all types but skip, if the field contains missing values, an additionnal 'missing-bit' field is added and is '1' only for missing values.\n"
00083                      "The difference between types constant and skip is only cosmetic: constant means the field is constant, while skip means either there are too many missing values or it has been forced to skip.\n"
00084                      "A report file is generated and contains the information about the processing for each field.\n"
00085                      "Target index of source needs to be specified (ie. to perform corelation test). It can be any field of the "
00086                      "source dataset, but will be the last field of the new dataset.*** We assume target is never missing *** \n\n"
00087                      "usage : FieldConvert\n"
00088                      "        *source                = [source dataset]\n"
00089                      "        *destination           = [new dataset with vmat extension]\n"
00090                      "        *target                = [field index of target]\n"
00091                      "         force                 = [force file]\n"
00092                      "         report                = [report file] (default = 'FieldConvertReport.txt')\n"
00093                      "         min_fraction          = [if number of unique values is > than 'fraction' * NonMISSING -> the field is continuous]\n"
00094                      "                                 (default = 0.3)\n"
00095                      "         max_pvalue            = [maximum pvalue to assume correlation with target] (default = 0.025)\n"
00096                      "         frac_missing_to_skip  = [if MISSING >= 'frac_missing_to_skip * number of samples then this field is skipped]\n"
00097                      "                                 (default = 1.0)\n"
00098                      "         frac_enough           = [if a field is discrete, only values represented by at least frac_enough * nSamples\n"
00099                      "                                 elements will be kept] (default = 0.005)\n"
00100                      "         precompute            = [none | pmat | ... : possibly add a <PRECOMPUTE> tag in the destination] (default = none)\n"
00101                      "         discrete_tolerance    = [if a discrete field has float values, its one hot mapping will be enlarged according to\n"
00102                      "                                 this factor] (default = 0.001)\n"
00103                      "         uniformize            = [0 | 1 | 2: whether fields should be uniformized, 2 meaning all fields and 1 meaning only\n"
00104                      "                                 fields obviously not following a normal distribution] (default = 0)\n"
00105                      "         frac_missing_sample   = [if a sample has more than 'frac_missing_sample' * n_fields missing fields, then this sample\n"
00106                      "                                 will be removed from the dataset (before analyzing the dataset's fields] (default = 1)\n"
00107                      "\n"
00108                      "where fields with asterix * are not optional\n"
00109         ) 
00110 {}
00111 
00113 // run //
00115 void FieldConvertCommand::run(const vector<string> & args)
00116 {
00117     // set default values
00118     UNIQUE_NMISSING_FRACTION_TO_ASSUME_CONTINUOUS = 0.3;
00119     PVALUE_THRESHOLD = 0.025;
00120     FRAC_MISSING_TO_SKIP = 1.0;
00121     FRAC_ENOUGH = 0.005;
00122     DISCRETE_TOLERANCE = 1e-3;
00123     real FRAC_MISSING_SAMPLE = 1;
00124     target = -1;
00125     report_fn="FieldConvertReport.txt";
00126     precompute = "none";
00127     int uniformize = 0;
00128     
00129     for(unsigned int i=0;i<args.size();i++)
00130     {
00131         vector<string> val = split(args[i],"=");
00132         if(val.size()<2)
00133             PLERROR("bad argument: %s ",args[i].c_str());
00134         if(val[0]=="source")
00135             source_fn=val[1];
00136         else if(val[0]=="destination")
00137             desti_fn=val[1];
00138         else if(val[0]=="target")
00139             target=toint(val[1]);
00140         else if(val[0]=="force")
00141             force_fn=val[1];
00142         else if(val[0]=="report")
00143             report_fn=val[1];
00144         else if(val[0]=="min_fraction")
00145             UNIQUE_NMISSING_FRACTION_TO_ASSUME_CONTINUOUS=toreal(val[1]);
00146         else if(val[0]=="max_pvalue")
00147             PVALUE_THRESHOLD=toreal(val[1]);
00148         else if(val[0]=="frac_missing_to_skip")
00149             FRAC_MISSING_TO_SKIP=toreal(val[1]);
00150         else if(val[0]=="discrete_tolerance")
00151             DISCRETE_TOLERANCE = toreal(val[1]);
00152         else if(val[0]=="uniformize")
00153             uniformize = toint(val[1]);
00154         else if(val[0]=="frac_enough")
00155             FRAC_ENOUGH=toreal(val[1]);
00156         else if(val[0]=="precompute")
00157             precompute = val[1];
00158         else if (val[0] == "frac_missing_sample")
00159             FRAC_MISSING_SAMPLE = toreal(val[1]);
00160         else PLERROR("unknown argument: %s ",val[0].c_str());
00161     }
00162     if(source_fn=="")
00163         PLERROR("you must specify source file");
00164     if(desti_fn=="")
00165         PLERROR("you must specify destination .vmat");
00166     if(target==-1)
00167         PLERROR("you must specify source target field index");
00168 
00169     // Manual map between field index and types.
00170     map<int, FieldType> force;
00171     map<int, string> additional_proc;
00172     map<int, int> additional_proc_size;
00173 
00174     real beta_hat,student=-1;
00175     real correlation = -1;
00176 
00177     // Get the dataset.
00178     VMat vm_orig = getDataSet(source_fn);
00179     VMat vm;
00180     int n_removed = 0;
00181     TVec<int> to_keep;
00182     if (FRAC_MISSING_SAMPLE < 1) {
00183         // We may have to remove some samples first.
00184         ProgressBar pb("Removing samples with too many missing values", vm_orig->length());
00185         int w = vm_orig->width();
00186         Vec row(w);
00187         int count;
00188         int max_count = int(w * FRAC_MISSING_SAMPLE);
00189         for (int i = 0; i < vm_orig->length(); i++) {
00190             vm_orig->getRow(i, row);
00191             count = 0;
00192             for (int j = 0; j < w; j++) {
00193                 if (is_missing(row[j]))
00194                     count++;
00195             }
00196             if (count <= max_count)
00197                 to_keep.append(i);
00198             else
00199                 n_removed++;
00200             pb.update(i+1);
00201         }
00202         pb.close();
00203         cout << "Removed " << n_removed << " samples that were missing more than " << max_count << " fields." << endl;
00204         if (n_removed > 0) {
00205             vm = new SelectRowsVMatrix(vm_orig, to_keep);
00206             vm->setMetaDataDir(newFilename("/tmp", "select_rows_temp_dir", true));
00207         }
00208         else
00209             vm = vm_orig;
00210     } else {
00211         vm = vm_orig;
00212     }
00213 
00214     // A vector where we store the indices of the fields to be uniformized.
00215     TVec<int> need_to_be_uniformized;
00216 
00217     if (target < 0 || target > vm->width()) {
00218         PLERROR("The target column you specified is not valid");
00219     }
00220   
00221     // Compute the result inputsize as the preprocessing goes on.
00222     int inputsize = 0;
00223  
00224     cout<<"### using field "<<target<<" as target"<<endl;
00225 
00227     // read user custom operation from file 'force_fname'
00228     vector<string> forcelines;
00229     if(force_fn!="")
00230         forcelines = getNonBlankLines(loadFileAsString(force_fn));
00231     for(int i=0; i<(signed)forcelines.size();i++)
00232     {
00233         size_t pos_of_equal = forcelines[i].find('=');
00234         if (pos_of_equal == string::npos)
00235             PLERROR("In FieldConvertCommand - A line in the force file does not contain the '=' character");
00236         vector<string> vec(2);
00237         vec[0] = forcelines[i].substr(0, pos_of_equal);
00238         vec[1] = forcelines[i].substr(pos_of_equal + 1);
00239 /*    cout << "vec[0] = " << vec[0] << endl;
00240       cout << "vec[1] = " << vec[1] << endl; */
00241         vector<string> leftpart = split(vec[0],"-");
00242         if (leftpart.size() == 1 && leftpart[0].substr(leftpart[0].size() - 1) == "+") {
00243             // Syntax: field+="processing" (number of inputs added)
00244             int field_index = vm->getFieldIndex(leftpart[0].substr(0, leftpart[0].size() - 1));
00245             if (field_index == -1)
00246                 PLERROR("In FieldConvertCommand - A field was not found in the source VMatrix");
00247             if (additional_proc[field_index] != "")
00248                 PLERROR("In FieldConvertCommand - There can be only one additional processing specified for each field");
00249             size_t last_open_par = vec[1].rfind('(');
00250             if (last_open_par == string::npos)
00251                 PLERROR("In FieldConvertCommand - You must specify the number of inputs added in a processing");
00252             string added_inputs = vec[1].substr(last_open_par + 1, vec[1].rfind(')') - last_open_par - 1);
00253             // cout << "added_inputs = " << added_inputs << endl;
00254             additional_proc_size[field_index] = toint(added_inputs);
00255             size_t first_comma = vec[1].find('"');
00256             size_t last_comma = vec[1].rfind('"', last_open_par);
00257             additional_proc[field_index] = vec[1].substr(first_comma + 1, last_comma - first_comma - 1);
00258             // cout << "Processing added: " << additional_proc[field_index] << endl;
00259         } else {
00260             FieldType rpart = stringToFieldType(vec[1]);
00261 
00262             if(leftpart.size()>1)
00263             {
00264                 // we have a range
00265                 int a = vm->getFieldIndex(leftpart[0]);
00266                 int b = vm->getFieldIndex(leftpart[1]);
00267                 for(int j=a;j<=b;j++) {
00268                     if (force.find(j) != force.end())
00269                         PLERROR("In FieldConvertCommand::run - Duplicate force type for variable %d", j);
00270                     force[j]=rpart;
00271                 }
00272             }
00273             else 
00274             {
00275                 int index = vm->getFieldIndex(vec[0]);
00276                 if (index == -1)
00277                     cout<<"field : "<<vec[0]<<" doesn't exist in matrix"<<endl;
00278                 if (force.find(index) != force.end())
00279                     PLERROR("In FieldConvertCommand::run - Duplicate force type for variable %d", index);
00280                 force[index] = rpart;
00281             }
00282         }
00283     }
00285 
00286     TVec<StatsCollector> sc;
00287     sc = vm->getStats();
00288 
00289     PStream out;
00290     PStream out_uni;
00291     PPath filename_non_uni = desti_fn + ".non_uniformized.vmat";
00292     if (uniformize > 0) {
00293         // We write two files: the one with the preprocessing and another one with
00294         // the uniformization.
00295         out = openFile(filename_non_uni, PStream::raw_ascii, "w");
00296         out_uni = openFile(desti_fn, PStream::raw_ascii, "w");
00297     } else {
00298         out = openFile(desti_fn, PStream::raw_ascii, "w");
00299     }
00300     PStream report = openFile(report_fn, PStream::raw_ascii, "w");
00301     out<<"<SOURCES>\n";
00302     if (n_removed == 0) {
00303         out << source_fn;
00304     } else {
00305         out << "@" << endl
00306             << "SelectRowsVMatrix(" << endl
00307             << "  source = AutoVMatrix(specification = \"" << source_fn << "\")" << endl
00308             << "  indices = [ " << to_keep << " ]" << endl
00309             << ")";
00310     }
00311     out << "\n</SOURCES>\n<PROCESSING>\n";
00312 
00313     // Minimun number of representants of a class to be considered significant.
00314     int n_enough = (int) (FRAC_ENOUGH * vm->length());
00315 
00316     PP<ProgressBar> pb = new ProgressBar("Analyzing fields", vm->width());
00317 
00318     // Process each field.
00319     for(int i=0;i<vm->width();i++)
00320     {
00321         type=unknown; // At the beginning we don't know the type.
00322         beta_hat=0;
00323         string message;
00324         int action = 0;
00325         int count = (int)sc[i].getCounts()->size()-1; // Number of unique values.
00326 
00327         // is this field's type forced ?
00328         if(force.find(i) != force.end())
00329             type = force[i];
00330         else if(i==target)
00331             // add target ONLY at the end of the process 
00332             // (so it's the last column of the dataset)
00333             type=skip;
00334 
00335         // Test for fields to be skipped, when not enough data is available.
00336         if(sc[i].nnonmissing() <= (1-FRAC_MISSING_TO_SKIP) * vm->length()) {
00337             if (type != unknown && type != skip && type != constant) {
00338                 // We forced the type to something that should not be skipped.
00339                 cout << "Warning: you forced the type of field number " << i << ", "
00340                      << "but there are too many missing values so it'll be skipped. "
00341                      << "If you want to keep it, you'll have to add it by hand to the resulting .vmat"
00342                      << endl;
00343             }
00344             type=skip;
00345         }
00346 
00347         // Test whether there are only 2 unique values: in this case, we don't
00348         // need a one hot, and we set it to binary (which will be processed the
00349         // same as continuous).
00350         if (count == 2 && type != skip) {
00351             Vec counts(2);
00352             int k = 0;
00353             for(map<real,StatsCollectorCounts>::iterator it = sc[i].getCounts()->begin(); k <= 1; ++it) {
00354                 counts[k++] = it->second.n;
00355             }
00356             if (counts[0] >= n_enough && counts[1] >= n_enough) {
00357                 if (type != unknown && type != binary) {
00358                     cout << "Warning: type for field number " << i << " set to binary, "
00359                          << "but you had forced it to something else." << endl;
00360                 }
00361                 type = binary;
00362             } else {
00363                 // Not enough representants for one of the classes.
00364                 if (type != unknown && type != skip) {
00365                     cout << "Warning: field number " << i << " is binary but doesn't have "
00366                          << "enough representants of each class, thus it'll be skipped, "
00367                          << "even if you had forced it to some other type (edit the resulting "
00368                          << ".vmat if you really want to add it)." << endl;
00369                 }
00370                 type = skip;
00371                 // cout << "Skipped binary field " << i << " (counts_0 = "
00372                 //     << counts[0] << ", counts_1 = " << counts[1] << ")" << endl;
00373             }
00374         }
00375 
00376         // Test for constant values.
00377         if(count<=1 && type != skip && type != constant) {
00378             if(sc[i].nmissing()>0 && sc[i].nmissing()<vm->length()) {
00379                 // This case actually never occurs in the Bell database.
00380                 // That's why we leave it to the user.
00381                 message = "Constant field, but there are " + tostring(sc[i].nmissing()) +
00382                     " missing values. Force the type, or modify this program !";
00383             }
00384             else {
00385                 // Either there is no missing value, or they are all missing.
00386                 if (type != unknown) {
00387                     cout << "Warning: field number " << i << " has been forced, but "
00388                          << "appears to be constant. Edit the resulting .vmat if you "
00389                          << "really want to add it." << endl;
00390                 }
00391                 type=constant;
00392             }
00393         }
00394 
00395         // Test if there exist fractional parts.
00396         // This test has two goals:
00397         //  - if we don't know the type, a fractional part indicates continuous data
00398         //  - if the type is discrete, we need to be careful in the one hot ranges
00399         //    because taking exact float values is not a good idea
00400         bool may_be_fraction = false;
00401         if (type == continuous || type == binary) {
00402             may_be_fraction = true;
00403         } else if (type != skip && type != constant) {
00404             int k = 0;
00405             for (map<real,StatsCollectorCounts>::iterator it = sc[i].getCounts()->begin(); k < count; ++it) {
00406                 real val = it->first;
00407                 k++;
00408                 if(!fast_exact_is_equal(val-(int)val, 0))
00409                 {
00410                     may_be_fraction = true;
00411                     break;
00412                 }
00413             }
00414         }
00415 
00416         // Did we find the type already?
00417         if (type == unknown && message == "")
00418         {
00419 
00420             if(sc[i].max()>-1000 && vm->getStringToRealMapping(i).size()>0)
00421                 message="Field uses both string map & numerical values";
00422             else if(sc[i].min() >= 0 && sc[i].max() >= 12000 && sc[i].max() <= 20000) {
00423                 // Could be a numeric SAS date.
00424                 // We first make sure they are all integer values.
00425                 bool non_integer = false;
00426                 for(int j=0;j<vm->length();j++)
00427                 {
00428                     real val = vm->get(j,i);
00429                     if(!is_missing(val) && ((val-(int)val) > 0))
00430                         non_integer = true;
00431                 }
00432                 if (!non_integer) {
00433                     message = "Looks like a numeric SAS date. If this is the case, first edit the source (.vmat) file to change the 'TextFilesVMatrix' field type (use sas_date), then edit force.txt to force the type to continuous. If it's not a date, please use force.txt to force the type.";
00434                 }
00435             }
00436             else if(sc[i].min()>19700000 && sc[i].max()<20080000)
00437                 // Could be a date between 1970 and 2008.
00438                 message="Looks like a date. Edit the source file to change the 'TextFilesVMatrix' field type (use jdate). Otherwise, edit force.txt to force the type.";
00439       
00440             // Test whether there are enough unique values to assume continuous data (having a string map implies discrete data)
00441             else if((count >= MIN( UNIQUE_NMISSING_FRACTION_TO_ASSUME_CONTINUOUS * sc[i].nnonmissing(), 2000)) 
00442                     && vm->getStringToRealMapping(i).size()==0)
00443                 type=continuous;
00444             else {
00445                 // if there are fractional parts, assume continuous
00446                 if (may_be_fraction) {
00447                     type=continuous;
00448                 }
00449             }
00450 
00451             // if the data doesn't look continuous (small numb. of unique 
00452             // values and no fractional parts), 'type' still equals unknown.
00453             if(type==unknown && message=="")
00454             {
00455                 // perform correlation test
00456                 real sigma_hat=0,sigma_beta_hat=0;
00457                 real xmean = sc[i].mean();
00458                 real ymean = sc[target].mean();
00459                 real x_minus_xmean_square=0;
00460                 real y_minus_ymean_square=0;
00461 
00462                 int len_nm = 0;
00463                 int len = vm->length();
00464 
00465                 Vec x(len);
00466                 Vec y(len);
00467                 vm->getColumn(i, x);
00468                 vm->getColumn(target, y);
00469           
00470                 // compute beta-hat
00471                 for(int j=0;j<len;j++)
00472                     if(!is_missing(x[j]) && !is_missing(y[j]))
00473                     {
00474                         real xdiff = x[j] - xmean;
00475                         real ydiff = y[j] - ymean;
00476                         beta_hat += xdiff * ydiff;
00477                         x_minus_xmean_square += xdiff * xdiff;
00478                         y_minus_ymean_square += ydiff * ydiff;
00479                         len_nm++;
00480                     }
00481           
00482                 // Correlation^2 = sum_xy^2 / (sum_xx * sum_yy).
00483                 correlation = fabs(beta_hat) / sqrt(x_minus_xmean_square * y_minus_ymean_square);
00484 
00485                 beta_hat /= x_minus_xmean_square;
00486 
00487                 // compute sigma-hat
00488                 for(int j=0;j<len;j++)
00489                     if(!is_missing(x[j]) && !is_missing(y[j]))
00490                         sigma_hat += square(y[j]-ymean - beta_hat*(x[j]-xmean));
00491                 sigma_hat /= len_nm-2;
00492 
00493                 sigma_beta_hat = sigma_hat / x_minus_xmean_square;
00494 
00495                 real t = beta_hat / sqrt(sigma_beta_hat);
00496 
00497                 student = 2 * student_t_cdf(-fabs(t), len_nm-2);
00498                 if(student < PVALUE_THRESHOLD)
00499                 {
00500                     // then assume data is discrete but correlated
00501                     type = discrete_corr;
00502                     // cout<<"##"<<i<<": nonmiss:"<<sc[i].nnonmissing()<<" b:"<<beta_hat<<" sigma_beta_hat:"<<sigma_beta_hat<<" T:"<<student<<endl;
00503                 }
00504             }
00505 
00506             // If we're still not sure (that is to say, type==unknown && message=="").
00507             if(type==unknown && message==""){
00508                 // is data 'uncorrelated + discrete + sparse'? Yes : Flag 
00509                 if((real)(sc[i].max()-sc[i].min()+1) > (real)(count)*2 ) {
00510                     type=continuous;
00511                     // cout << "Uncorrelated + discrete + sparse: " << i << " (max = " << sc[i].max() << ", min = " << sc[i].min() << ", count = " << count << ")" << endl;
00512                 }
00513                 else if(!fast_exact_is_equal(real(sc[i].max()-sc[i].min()+1), real(count)) )
00514                     message = "(edit force.txt): Data is made of a semi-sparse (density<50%) distribution of integers (uncorrelated with target). max: "+tostring(sc[i].max())+" min:"+tostring(sc[i].min())+" count:"+tostring(count);
00515                 else {
00516                     // data is discrete, not sparse, and not correlated to target,
00517                     // then simply make it as onehot
00518                     type = discrete_uncorr;
00519                     // cout << "Discrete uncorrelated: " << i << endl;
00520                 }
00521             }
00522         }
00523 
00524         // Now find out which actions to perform according to type.
00525 
00526         // We treat 'binary' as 'continuous'.
00527         if (type == binary)
00528             type = continuous;
00529 
00530         if(type==unknown)
00531             cout<<tostring(i)+" ("+vm->fieldName(i)+") "<<message<<endl;
00532         else if(type==continuous)
00533         {
00534             action |= NORMALIZE;
00535             if(sc[i].nmissing()>0)
00536                 action |= MISSING_BIT;
00537         }  
00538         else if(type==discrete_uncorr)
00539         {
00540             action = ONEHOT;
00541             if(sc[i].nmissing()>0)
00542                 action |= MISSING_BIT;
00543         }  
00544         else if(type==skip || type==constant)
00545         {
00546             action = SKIP;
00547         }
00548         else if(type==discrete_corr)
00549         {
00550             action |= NORMALIZE;
00551             action |= ONEHOT;
00552             if(sc[i].nmissing()>0)
00553                 action |= MISSING_BIT;
00554         }
00555     
00556         // Perform actions.
00557     
00558         if(action&NORMALIZE)
00559         {
00560 
00561             out << "@" << vm->fieldName(i) << " ";
00562             // Replace Nans by either the most frequent value or the mean.
00563             if(sc[i].nmissing()>0)
00564             {
00565                 // find out 'mode' of the distribution, if any
00566                 double maxi=-1;
00567                 real missingval = -1;
00568                 for(map<real,StatsCollectorCounts>::iterator it = sc[i].getCounts()->begin(); it!=sc[i].getCounts()->end(); ++it)
00569                     if(it->second.n > maxi)
00570                     {
00571                         maxi=it->second.n;
00572                         missingval=it->first;
00573                     }
00574                 if(maxi<10)
00575                     // The most frequent value appears less than 10 times: a missing value is replaced by the mean.
00576                     missingval=sc[i].mean();
00577                 else {
00578                     // We replace a missing value by the most frequent value.
00579                     // cout << i << ": maxi >= 10, and missingval = " << missingval << endl;
00580                 }
00581         
00582                 out << "isnan " << missingval << " @" << vm->fieldName(i) << " ifelse ";
00583             }
00584 
00585             // Uniformize all fields when 'uniformize' is set to 2.
00586             bool to_uniformize = (uniformize == 2);
00587             // If this field violates the normal assumption, and the user set the
00588             // 'uniformize' option to 1, then we should keep this field intact, and
00589             // remember it will need to be uniformized in the final vmat.
00590             bool apply_normalization = true;
00591             if (uniformize == 1) {
00592                 real max = sc[i].max();
00593                 real min = sc[i].min();
00594                 real mu = sc[i].mean();
00595                 real sigma = sc[i].stddev();
00596                 int nsamp = (int) sc[i].nnonmissing();
00597                 real confidence = 0.05;
00598                 real alpha = gauss_01_quantile(pow((1 - confidence), 1 / real(nsamp)));
00599                 if ( (max - mu) / sigma > alpha  || (min - mu) / sigma < - alpha) {
00600                     // Normal assumption violated.
00601                     to_uniformize = true;
00602                 }
00603             }
00604             if (to_uniformize) {
00605                 action ^= NORMALIZE;    // Remove the 'normalize' action.
00606                 action |= UNIFORMIZE;   // And add the 'uniformize' one.
00607                 apply_normalization = false;
00608                 out << ":" << vm->fieldName(i) << endl;
00609                 need_to_be_uniformized.append(inputsize);
00610             }
00611 
00612             // And apply normalization if we still need to do it.
00613             if (apply_normalization) {
00614                 real mu = sc[i].mean();
00615                 real sigma = sc[i].stddev();
00616                 out << mu << " - " << sigma << " / :" << vm->fieldName(i)<<"\n";
00617             }
00618 
00619             // Increase the counter of inputs.
00620             inputsize++;
00621         }
00622 
00623         int n_discarded = 0;
00624         if(action&ONEHOT) {
00625             // First see if any value must be discarded, because not present often
00626             // enough in the dataset.
00627             int k = 0;
00628             TVec<bool> to_be_included(count);
00629             for (int j = 0; j < count; j++) {
00630                 to_be_included[j] = true;
00631             }
00632             for(map<real,StatsCollectorCounts>::iterator it = sc[i].getCounts()->begin(); k<((int)sc[i].getCounts()->size()) - 1; ++it) {
00633                 if (it->second.n < n_enough) {
00634                     to_be_included[k] = false;
00635                     n_discarded++;
00636                     // cout << "Field " << i << ": value " << it->first
00637                     //     << " discarded (n = " << it->second.n << ")." << endl;
00638                 }
00639                 k++;
00640             }
00641             if (n_discarded <= count - 1) {
00642                 // We only consider this field if there is at least 1 class left.
00643                 // TODO TMP
00644 //        RealMapping rm = sc[i].getBinMapping(1,1);
00645 //        out << "@" << vm->fieldName(i) << " " << rm << " onehot :"
00646 //            << vm->fieldName(i) << ":0:" << rm.size() << endl;
00647                 real tol = 0;
00648                 if (may_be_fraction) {
00649                     // We need to take a margin because of floating point precision.
00650                     tol = DISCRETE_TOLERANCE;
00651                 }
00652                 RealMapping rm = sc[i].getAllValuesMapping(&to_be_included, 0, true, tol);
00653                 out << "@"<<vm->fieldName(i) <<" " << rm << " "
00654                     << rm.size() << " onehot :"
00655                     << vm->fieldName(i)<<"_:0:"<< (rm.size() - 1) << endl;
00656 /*        out << "@"<<vm->fieldName(i) <<" " << sc[i].getAllValuesMapping(&to_be_included, 0, true) << " "
00657           << count - n_discarded << " onehot :"
00658           << vm->fieldName(i)<<"_:0:"<<(count - 1 - n_discarded) << endl; */
00659                 inputsize += count - n_discarded;
00660             }
00661         }
00662 
00663         if(action&MISSING_BIT)
00664         {
00665             out<<"@"<<vm->fieldName(i)<<" isnan 1 0 ifelse :"<<vm->fieldName(i)<<"_mbit\n";      
00666             inputsize++;
00667         }
00668 
00669         report<<tostring(i)+" ("+vm->fieldName(i)+") [c="<<count<<" nm="<<sc[i].nnonmissing()<<"] ";
00670         if(action==0)report<<"~~user intervention required :"<<message;
00671         if(action&NORMALIZE) {
00672             report << "NORMALIZE ";
00673 /*      if (countlog > 0) {
00674         report << "(after " << countlog << " log) ";
00675         }*/
00676         }
00677         if (action & UNIFORMIZE) report << "UNIFORMIZE ";
00678         if (action&ONEHOT) report<<"ONEHOT("<<count<<") - discarded: " << n_discarded << " ";
00679         if (type==discrete_corr) report<<"correl: "<<correlation<<" 2tail-student:"<<student<<" ";
00680         if (action&MISSING_BIT) report<<"MISSING_BIT ";
00681         if (action&SKIP) report<<"SKIP ";
00682         if (additional_proc[i] != "") {
00683             // There is an additional processing to add after this field.
00684             out << additional_proc[i] << endl;
00685             inputsize += additional_proc_size[i];
00686             report << "ADD_PROC ";
00687         }
00688         report<<endl;
00689 
00690         pb->update(i);
00691    
00692     }
00693 
00694     // Add the target.
00695     out << "%" << target << " :target\n</PROCESSING>"<<endl;
00696 
00697     // Add the sizes.
00698     out << endl  << "<SIZES>"  << endl
00699         << inputsize  << endl // inputsize
00700         << "1"        << endl // targetsize
00701         << "0"        << endl // weightsize
00702         << "</SIZES>" << endl;
00703 
00704     // Now build the uniformized VMatrix if 'uniformize' has been set.
00705     if (uniformize > 0) {
00706         // Prepare the 'shift' and 'scale' vectors to map uniformized fields to
00707         // [-1,1] instead of default [0,1].
00708         Vec shift(inputsize + 1);  // +1 because of the target.
00709         Vec scale(inputsize + 1);
00710         shift.fill(0);
00711         scale.fill(1);
00712         for (int i = 0; i < need_to_be_uniformized.length(); i++) {
00713             shift[need_to_be_uniformized[i]] = -0.5;
00714             scale[need_to_be_uniformized[i]] = 2;
00715         }
00716         // Write the .vmat file.
00717         out_uni << "# Preprocessed VMat" << endl;
00718         out_uni << "<SOURCES>" << endl;
00719         out_uni << "@" << endl
00720                 << "ShiftAndRescaleVMatrix(" << endl
00721                 << "  automatic = 0" << endl
00722                 << "  shift = [" << shift << "]" << endl
00723                 << "  scale = [" << scale << "]" << endl
00724                 << "  underlying_vmat =" << endl;
00725         out_uni << "   PLearnerOutputVMatrix(" << endl;
00726         out_uni << "     train_learners = 1" << endl;
00727         out_uni << "     data = AutoVMatrix(specification = \"" << filename_non_uni << "\")" << endl;
00728         out_uni << "     learners = [" << endl;
00729         out_uni << "       UniformizeLearner(" << endl;
00730         out_uni << "         which_fieldnums = ";
00731         out_uni << "[ " << need_to_be_uniformized << "]" << endl;
00732         out_uni << "       )" << endl;
00733         out_uni << "     ]" << endl;
00734         out_uni << "   )" << endl
00735                 << ")" << endl;
00736         out_uni << "</SOURCES>" << endl << endl;
00737         out_uni << "<SIZES>"  << endl
00738                 << inputsize  << endl // inputsize
00739                 << "1"        << endl // targetsize
00740                 << "0"        << endl // weightsize
00741                 << "</SIZES>" << endl;
00742     }
00743 
00744     // Possibly add the <PRECOMPUTE> tag.
00745     if (precompute != "none") {
00746         out << endl << "<PRECOMPUTE>" << endl << precompute << endl << "</PRECOMPUTE>" << endl;
00747         if (uniformize > 0) {
00748             out_uni << endl << "<PRECOMPUTE>" << endl << precompute << endl << "</PRECOMPUTE>" << endl;
00749         }
00750     }
00751 }
00752 
00754 // stringToFieldType //
00756 PLearn::FieldConvertCommand::FieldType FieldConvertCommand::stringToFieldType(string s) {
00757     if (s.find("continuous") != string::npos)
00758         return continuous;
00759     else if (s.find("discrete_uncorr")!= string::npos )
00760         return discrete_uncorr;
00761     else if (s.find("discrete_corr") != string::npos)
00762         return discrete_corr;
00763     else if (s.find("constant") != string::npos)
00764         return constant;
00765     else if (s.find("binary") != string::npos)
00766         return binary;
00767     else if (s.find("skip") != string::npos)
00768         return skip;
00769     else {
00770         PLERROR("In FieldConvertCommand::stringToFieldType Unknown field type: %s",s.c_str());
00771         return skip;
00772     }
00773 }
00774 
00775 
00776 /*
00777   Local Variables:
00778   mode:c++
00779   c-basic-offset:4
00780   c-file-style:"stroustrup"
00781   c-file-offsets:((innamespace . 0)(inline-open . 0))
00782   indent-tabs-mode:nil
00783   fill-column:79
00784   End:
00785 */
00786 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines