PLearn 0.1
CovariancePreservationImputationVMatrix.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2001 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal
00006 // Copyright (C) 2002 Pascal Vincent, Julien Keable, Xavier Saint-Mleux
00007 // Copyright (C) 2003 Olivier Delalleau
00008 //
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036 
00037 
00038 /* *******************************************************************    
00039    * $Id: CovariancePreservationImputationVMatrix.cc 3658 2005-07-06 20:30:15  Godbout $
00040    ******************************************************************* */
00041 
00042 
00043 #include "CovariancePreservationImputationVMatrix.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00050 PLEARN_IMPLEMENT_OBJECT(
00051   CovariancePreservationImputationVMatrix,
00052   "VMat class to impute values preserving the observed relationships between variables on a global basis.",
00053   "This class will replace a missing value in the underlying dataset with a value computed to minimized\n"
00054   "the distance of the sample covariates with the global covariance vector of the observed data.\n"
00055   );
00056 
00057 CovariancePreservationImputationVMatrix::CovariancePreservationImputationVMatrix()
00058 {
00059 }
00060 
00061 CovariancePreservationImputationVMatrix::~CovariancePreservationImputationVMatrix()
00062 {
00063 }
00064 
00065 void CovariancePreservationImputationVMatrix::declareOptions(OptionList &ol)
00066 {
00067 
00068   declareOption(ol, "train_set", &CovariancePreservationImputationVMatrix::train_set, OptionBase::buildoption, 
00069                 "A referenced train set.\n"
00070                 "The covariance imputation is computed with the observed values in this data set.\n");
00071 
00072   inherited::declareOptions(ol);
00073 }
00074 
00075 void CovariancePreservationImputationVMatrix::build()
00076 {
00077   inherited::build();
00078   build_();
00079 }
00080 
00081 void CovariancePreservationImputationVMatrix::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00082 {
00083   deepCopyField(train_set, copies);
00084   inherited::makeDeepCopyFromShallowCopy(copies);
00085 }
00086 
00087 void CovariancePreservationImputationVMatrix::getExample(int i, Vec& input, Vec& target, real& weight)
00088 {
00089   source->getExample(i, input, target, weight);
00090   for (int source_col = 0; source_col < input->length(); source_col++)
00091   {
00092     if (is_missing(input[source_col])) input[source_col] = computeImputation(i, source_col, input);
00093   }  
00094 }
00095 
00096 real CovariancePreservationImputationVMatrix::get(int i, int j) const
00097 { 
00098   real variable_value = source->get(i, j);
00099   if (is_missing(variable_value)) computeImputation(i, j);
00100   return variable_value;
00101 }
00102 
00103 void CovariancePreservationImputationVMatrix::put(int i, int j, real value)
00104 {
00105   PLERROR("In CovariancePreservationImputationVMatrix::put not implemented");
00106 }
00107 
00108 void CovariancePreservationImputationVMatrix::getSubRow(int i, int j, Vec v) const
00109 {  
00110   source->getSubRow(i, j, v);
00111   for (int source_col = 0; source_col < v->length(); source_col++) 
00112     if (is_missing(v[source_col])) v[source_col] = computeImputation(i, source_col + j);
00113 }
00114 
00115 void CovariancePreservationImputationVMatrix::putSubRow(int i, int j, Vec v)
00116 {
00117   PLERROR("In CovariancePreservationImputationVMatrix::putSubRow not implemented");
00118 }
00119 
00120 void CovariancePreservationImputationVMatrix::appendRow(Vec v)
00121 {
00122   PLERROR("In CovariancePreservationImputationVMatrix::appendRow not implemented");
00123 }
00124 
00125 void CovariancePreservationImputationVMatrix::insertRow(int i, Vec v)
00126 {
00127   PLERROR("In CovariancePreservationImputationVMatrix::insertRow not implemented");
00128 }
00129 
00130 void CovariancePreservationImputationVMatrix::getRow(int i, Vec v) const
00131 {  
00132   source-> getRow(i, v);
00133   for (int source_col = 0; source_col < v->length(); source_col++)
00134     if (is_missing(v[source_col])) v[source_col] = computeImputation(i, source_col, v);
00135 }
00136 
00137 void CovariancePreservationImputationVMatrix::putRow(int i, Vec v)
00138 {
00139   PLERROR("In CovariancePreservationImputationVMatrix::putRow not implemented");
00140 }
00141 
00142 void CovariancePreservationImputationVMatrix::getColumn(int i, Vec v) const
00143 {  
00144   source-> getColumn(i, v);
00145   for (int source_row = 0; source_row < v->length(); source_row++)
00146     if (is_missing(v[source_row])) v[source_row] = computeImputation(source_row, i);
00147 }
00148 
00149 
00150 
00151 void CovariancePreservationImputationVMatrix::build_()
00152 {
00153     if (!train_set || !source) PLERROR("In CovariancePreservationImputationVMatrix::train set and source vmat must be supplied");
00154     train_length = train_set->length();
00155     if(train_length < 1) PLERROR("In CovariancePreservationImputationVMatrix::length of the number of train samples to use must be at least 1, got: %i", train_length);
00156     train_width = train_set->width();
00157     train_targetsize = train_set->targetsize();
00158     train_weightsize = train_set->weightsize();
00159     train_inputsize = train_set->inputsize();
00160     if(train_inputsize < 1) PLERROR("In CovariancePreservationImputationVMatrix::inputsize of the train vmat must be supplied, got : %i", train_inputsize);
00161     source_width = source->width();
00162     source_targetsize = source->targetsize();
00163     source_weightsize = source->weightsize();
00164     source_inputsize = source->inputsize();
00165     if (train_width != source_width) PLERROR("In CovariancePreservationImputationVMatrix::train set and source width must agree, got : %i, %i", train_width, source_width);
00166     if (train_targetsize != source_targetsize) PLERROR("In CovariancePreservationImputationVMatrix::train set and source targetsize must agree, got : %i, %i", train_targetsize, source_targetsize);
00167     if (train_weightsize != source_weightsize) PLERROR("In CovariancePreservationImputationVMatrix::train set and source weightsize must agree, got : %i, %i", train_weightsize, source_weightsize);
00168     if (train_inputsize != source_inputsize) PLERROR("In CovariancePreservationImputationVMatrix::train set and source inputsize must agree, got : %i, %i", train_inputsize, source_inputsize);
00169     train_field_names.resize(train_width);
00170     train_field_names = train_set->fieldNames();
00171     source_length = source->length();
00172     length_ = source_length;
00173     width_ = source_width;
00174     inputsize_ = source_inputsize;
00175     targetsize_ = source_targetsize;
00176     weightsize_ = source_weightsize;
00177     declareFieldNames(train_field_names);
00178     train_metadata = train_set->getMetaDataDir();
00179     covariance_file_name = train_metadata + "covariance_file.pmat";
00180     cov.resize(train_width, train_width);
00181     mu.resize(train_width);
00182     if (!isfile(covariance_file_name))
00183     {
00184         computeCovariances();
00185         createCovarianceFile();
00186     }
00187     else loadCovarianceFile();
00188 }
00189 
00190 void CovariancePreservationImputationVMatrix::createCovarianceFile()
00191 {
00192     covariance_file = new FileVMatrix(covariance_file_name, train_width + 1, train_field_names);
00193     for (indj = 0; indj < train_width; indj++)
00194     {
00195         for (indk = 0; indk < train_width; indk++)
00196         {
00197             covariance_file->put(indj, indk, cov(indj, indk));
00198         }
00199     }
00200     for (indk = 0; indk < train_width; indk++)
00201     {
00202         covariance_file->put(train_width, indk, mu[indk]);
00203     }
00204 }
00205 
00206 void CovariancePreservationImputationVMatrix::loadCovarianceFile()
00207 {
00208     covariance_file = new FileVMatrix(covariance_file_name);
00209     for (indj = 0; indj < train_width; indj++)
00210     {
00211         for (indk = 0; indk < train_width; indk++)
00212         {
00213             cov(indj, indk) = covariance_file->get(indj, indk);
00214         }
00215     }
00216     for (indk = 0; indk < train_width; indk++)
00217     {
00218         mu[indk] = covariance_file->get(train_width, indk);
00219     }
00220 }
00221 
00222 VMat CovariancePreservationImputationVMatrix::getCovarianceFile()
00223 {
00224     return covariance_file;
00225 }
00226 
00227 void CovariancePreservationImputationVMatrix::computeCovariances()
00228 {
00229 /*
00230     We need to populate the matrix of COV for all combinations of input variables
00231     we need in one pass to populate 4 matrices of dxd:
00232     n(j,k) the number of samples where x(i, j) and x(i, k) are simultaneously observed.
00233     sum_x(j)(k) the sum of the x(i, j) values where x(i, j) and x(i, k) are simultaneously observed.
00234     sum_x(j)_x(k) the sum of the x(i, j)*x(i, k) values where x(i, j) and x(i, k) are simultaneously observed.
00235     we can the calculate mu(k) = sum_x(k, k)/n(k, k)
00236     COV(j, k) = (sum_x(j)_x(k) - sum_x(j)(k) * mu(k) - sum_x(k)(j) * mu(j) + mu(k) * mu(j)) (1 / n(j,k))
00237     All we need after is the COV matrix to impute values on missing values.
00238     
00239 */
00240     n_obs.resize(train_width, train_width);
00241     sum_xj.resize(train_width, train_width);
00242     sum_xj_xk.resize(train_width, train_width);
00243     train_input.resize(train_width);
00244     n_obs.clear();
00245     sum_xj.clear();
00246     sum_xj_xk.clear();
00247     mu.clear();
00248     cov.clear();
00249     ProgressBar* pb = 0;
00250     pb = new ProgressBar("Computing the covariance matrix", train_length);
00251     for (train_row = 0; train_row < train_length; train_row++)
00252     {
00253         train_set->getRow(train_row, train_input);
00254         for (indj = 0; indj < train_width; indj++)
00255         {
00256             for (indk = 0; indk < train_width; indk++)
00257             {
00258                 if (is_missing(train_input[indj]) || is_missing(train_input[indk])) continue;
00259                 n_obs(indj, indk) += 1.0;
00260                 sum_xj(indj, indk) += train_input[indj];
00261                 sum_xj_xk(indj, indk) += train_input[indj] * train_input[indk];
00262             }
00263         }
00264         pb->update( train_row ); 
00265     }
00266     delete pb;
00267     for (indj = 0; indj < train_width; indj++)
00268     {
00269         mu[indj] = sum_xj(indj, indj) / n_obs(indj, indj); 
00270     }
00271     for (indj = 0; indj < train_width; indj++)
00272     {
00273         for (indk = 0; indk < train_width; indk++)
00274         {
00275             cov(indj, indk) = sum_xj_xk(indj, indk) - sum_xj(indj, indk) * mu[indk] - sum_xj(indk, indj) * mu[indj];
00276             cov(indj, indk) = (cov(indj, indk) /  n_obs(indj, indk)) + mu[indk] * mu[indj];
00277         }
00278     }
00279 }
00280 
00281 real CovariancePreservationImputationVMatrix::computeImputation(int row, int col) const
00282 {
00283     Vec input(source_width);
00284     source->getRow(row, input);
00285     return computeImputation(row, col, input);
00286 }
00287 
00288 real CovariancePreservationImputationVMatrix::computeImputation(int row, int col, Vec input) const
00289 {
00290     real sum_cov_xl = 0;
00291     real sum_xl_square = 0;
00292     for (int indl = 0; indl < source_width; indl++)
00293     {
00294         if (is_missing(input[indl])) continue;
00295         sum_cov_xl += cov(indl, col) * (input[indl] - mu[indl]);
00296         sum_xl_square += (input[indl] - mu[indl]) * (input[indl] - mu[indl]);
00297     }
00298     if (sum_xl_square == 0.0) return mu[col];
00299     return mu[col] + sum_cov_xl / sum_xl_square;
00300 }
00301 
00302 } // end of namespcae PLearn
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines