PLearn 0.1
LinearRegressor.cc
Go to the documentation of this file.
00001 
00002 // -*- C++ -*-
00003 
00004 // LinearRegressor.cc
00005 //
00006 // Copyright (C) 2003  Yoshua Bengio 
00007 // 
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 /* *******************************************************      
00037  * $Id: LinearRegressor.cc 9719 2008-11-25 16:47:57Z tihocan $
00038  ******************************************************* */
00039 
00041 #include "LinearRegressor.h"
00042 #include <plearn/vmat/VMat_linalg.h>
00043 #include <plearn/vmat/ExtendedVMatrix.h>
00044 #include <plearn/math/pl_erf.h>
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 /* ### Initialise all fields to their default value here */
00050 LinearRegressor::LinearRegressor()
00051     : sum_squared_y(MISSING_VALUE),
00052       sum_gammas(MISSING_VALUE),
00053       weights_norm(MISSING_VALUE),
00054       weights(),
00055       AIC(MISSING_VALUE),
00056       BIC(MISSING_VALUE),
00057       resid_variance(),
00058       include_bias(true),
00059       cholesky(true),
00060       weight_decay(0.0),
00061       output_learned_weights(false)
00062 { }
00063 
00064 PLEARN_IMPLEMENT_OBJECT(
00065     LinearRegressor,
00066     "Ordinary Least Squares and Ridge Regression, optionally weighted", 
00067     "This class performs OLS (Ordinary Least Squares) and Ridge Regression, optionally on weighted\n"
00068     "data, by solving the linear equation (X'W X + weight_decay*n_examples*I) theta = X'W Y\n"
00069     "where X is the (n_examples x (1+inputsize)) matrix of extended inputs \n"
00070     "(with a 1 in the first column, only if the option 'include_bias' is true),\n"
00071     "Y is the (n_example x targetsize), W is a diagonal matrix of weights (one per example)\n"
00072     "{the identity matrix if weightsize()==0 in the training set}, and theta is the resulting\n"
00073     "set of parameters. W_{ii} is obtained from the weight column of the training set, if any.\n"
00074     "This column must have width 0 (no weight) or 1.\n"
00075     "A prediction (computeOutput) is obtained from an input vector as follows:\n"
00076     "   output = theta * (1,input)\n"
00077     "The criterion that is minimized by solving the above linear system is the squared loss"
00078     "plus squared norm penalty (weight_decay*sum_{ij} theta_{ij}^2) PER EXAMPLE. This class also measures"
00079     "the ordinary squared loss (||output-theta||^2). The two costs are named 'mse+penalty' and 'mse' respectively.\n"
00080     "Training has two steps: (1) computing X'W X and X' W Y, (2) solving the linear system.\n"
00081     "The first step takes time O(n_examples*inputsize^2 + n_examples*inputsize*outputsize).\n"
00082     "The second step takes time O(inputsize^3).\n"
00083     "If train() is called repeatedly with different values of weight_decay, without intervening\n"
00084     "calls to forget(), then the first step will be done only once, and only the second step\n"
00085     "is repeated.\n"
00086     "\n"
00087     "The Akaike Information Criterion (AIC) and Bayerian Information Criterion (BIC)\n"
00088     "are computed on the training set.  They are output as both training and test costs,\n"
00089     "with respective cost-names \"aic\" and \"bic\".  Their arithmetic mean is also output\n"
00090     "under costname \"mabic\".  Since these criteria are TRAINING concepts,  the\n"
00091     "test costs that are output are CONSTANT and equal to the training costs.\n"
00092     );
00093 
00094 void LinearRegressor::declareOptions(OptionList& ol)
00095 {
00096     // ### Declare all of this object's options here
00097     // ### For the "flags" of each option, you should typically specify  
00098     // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00099     // ### OptionBase::tuningoption. Another possible flag to be combined with
00100     // ### is OptionBase::nosave
00101 
00102     //#####  Build Options  ####################################################
00103 
00104     declareOption(ol, "include_bias", &LinearRegressor::include_bias,
00105                   OptionBase::buildoption,
00106                   "Whether to include a bias term in the regression (true by default)");
00107   
00108     declareOption(ol, "cholesky", &LinearRegressor::cholesky,
00109                   OptionBase::buildoption, 
00110                   "Whether to use the Cholesky decomposition or not, "
00111                   "when solving the linear system. Default=1 (true)");
00112 
00113     declareOption(ol, "weight_decay", &LinearRegressor::weight_decay,
00114                   OptionBase::buildoption, 
00115                   "The weight decay is the factor that multiplies the "
00116                   "squared norm of the parameters in the loss function");
00117 
00118     declareOption(ol, "output_learned_weights",
00119                   &LinearRegressor::output_learned_weights,
00120                   OptionBase::buildoption,
00121                   "If true, the result of computeOutput*() functions is not the\n"
00122                   "result of thre regression, but the learned regression parameters.\n"
00123                   "(i.e. the matrix 'weights').  The matrix is flattened by rows.\n"
00124                   "NOTE by Nicolas Chapados: this option is a bit of a hack and might\n"
00125                   "be removed in the future.  Let me know if you come to rely on it.");
00126                 
00127   
00128     //#####  Learnt Options  ###################################################
00129   
00130     declareOption(ol, "weights", &LinearRegressor::weights,
00131                   OptionBase::learntoption, 
00132                   "The weight matrix, which are the parameters computed by "
00133                   "training the regressor.\n");
00134 
00135     declareOption(ol, "AIC", &LinearRegressor::AIC,
00136                   OptionBase::learntoption,
00137                   "The Akaike Information Criterion computed at training time;\n"
00138                   "Saved as a learned option to allow outputting AIC as a test cost.");
00139 
00140     declareOption(ol, "BIC", &LinearRegressor::BIC,
00141                   OptionBase::learntoption,
00142                   "The Bayesian Information Criterion computed at training time;\n"
00143                   "Saved as a learned option to allow outputting BIC as a test cost.");
00144 
00145     declareOption(ol, "resid_variance", &LinearRegressor::resid_variance,
00146                   OptionBase::learntoption,
00147                   "Estimate of the residual variance for each output variable\n"
00148                   "Saved as a learned option to allow outputting confidence intervals\n"
00149                   "when model is reloaded and used in test mode.\n");
00150   
00151     // Now call the parent class' declareOptions
00152     inherited::declareOptions(ol);
00153 
00154     // Unused options.
00155 
00156     redeclareOption(ol, "seed", &LinearRegressor::seed_, OptionBase::nosave,
00157                     "The random seed is not used in a linear regressor.");
00158 }
00159 
00160 void LinearRegressor::build_()
00161 {
00162     // This resets various accumulators to speed up successive iterations of
00163     // training in the case the training set has not changed.
00164     resetAccumulators();
00165 }
00166 
00167 // ### Nothing to add here, simply calls build_
00168 void LinearRegressor::build()
00169 {
00170     inherited::build();
00171     build_();
00172 }
00173 
00174 
00175 void LinearRegressor::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00176 {
00177     inherited::makeDeepCopyFromShallowCopy(copies);
00178     // ### Call deepCopyField on all "pointer-like" fields 
00179     // ### that you wish to be deepCopied rather than 
00180     // ### shallow-copied.
00181     // ### ex:
00182     deepCopyField(extendedinput, copies);
00183     deepCopyField(input, copies);
00184     deepCopyField(train_costs, copies);
00185     deepCopyField(XtX, copies);
00186     deepCopyField(XtY, copies);
00187     deepCopyField(weights, copies);
00188     deepCopyField(resid_variance, copies);
00189 }
00190 
00191 
00192 int LinearRegressor::outputsize() const
00193 {
00194     // If we output the learned parameters, the outputsize is the number of
00195     // parameters
00196     if (output_learned_weights)
00197         return max(effective_inputsize() * targetsize(), -1);
00198 
00199     int ts = targetsize();
00200     if (ts >= 0) {
00201         return ts;
00202     } else {
00203         // This learner's training set probably hasn't been set yet, so
00204         // we don't know the targetsize.
00205         return 0;
00206     }
00207 }
00208 
00209 void LinearRegressor::resetAccumulators()
00210 {
00211     XtX.resize(0,XtX.width());
00212     XtY.resize(0,XtY.width());
00213     sum_squared_y = 0;
00214     sum_gammas = 0;
00215 }
00216 
00217 void LinearRegressor::forget()
00218 {
00219     resetAccumulators();
00220     resid_variance.resize(0);
00221 }
00222 
00223 void LinearRegressor::train()
00224 {
00225     if(targetsize()<=0)
00226         PLERROR("In LinearRegressor::train() -  Targetsize (%d) must be "
00227                 "positive", targetsize());
00228 
00229     // Preparatory buffer allocation
00230     bool recompute_XXXY = (XtX.length()==0);
00231     if (recompute_XXXY)
00232     {
00233         XtX.resize(effective_inputsize(), effective_inputsize());
00234         XtY.resize(effective_inputsize(), targetsize());
00235     }
00236     if(!train_stats)  // make a default stats collector, in case there's none
00237         train_stats = new VecStatsCollector();
00238 
00239     train_stats->setFieldNames(getTrainCostNames());
00240     train_stats->forget();
00241 
00242     // Compute training inputs and targets; take into account optional bias
00243     real squared_error=0;
00244     Vec outputwise_sum_squared_Y;
00245     VMat trainset_inputs  = train_set.subMatColumns(0, inputsize());
00246     VMat trainset_targets = train_set.subMatColumns(inputsize(), targetsize());
00247     if (include_bias)                          // prepend a first column of ones
00248         trainset_inputs = new ExtendedVMatrix(trainset_inputs,0,0,1,0,1.0);
00249 
00250     // Choose proper function depending on whether the dataset is weighted
00251     weights.resize(effective_inputsize(), targetsize());
00252     if (train_set->weightsize()<=0)
00253     {
00254         squared_error =
00255             linearRegression(trainset_inputs, trainset_targets,
00256                              weight_decay, weights, 
00257                              !recompute_XXXY, XtX, XtY,
00258                              sum_squared_y, outputwise_sum_squared_Y,
00259                              true, report_progress?verbosity:0, 
00260                              cholesky, include_bias?1:0);
00261     }
00262     else if (train_set->weightsize()==1)
00263     {
00264         squared_error =
00265             weightedLinearRegression(trainset_inputs, trainset_targets,
00266                                      train_set.subMatColumns(inputsize()+targetsize(),1),
00267                                      weight_decay, weights,
00268                                      !recompute_XXXY, XtX, XtY, sum_squared_y, outputwise_sum_squared_Y,
00269                                      sum_gammas, true, report_progress?verbosity:0, 
00270                                      cholesky, include_bias?1:0);
00271     }
00272     else
00273         PLERROR("LinearRegressor: expected dataset's weightsize to be either 1 or 0, got %d\n",
00274                 train_set->weightsize());
00275 
00276     // Update the AIC and BIC criteria
00277     computeInformationCriteria(squared_error, train_set.length());
00278 
00279     // Update the sigmas for confidence intervals (the current formula does
00280     // not account for the weights in the case of weighted linear regression)
00281     computeResidualsVariance(outputwise_sum_squared_Y);
00282 
00283     // Update the training costs
00284     Mat weights_excluding_biases = weights.subMatRows(include_bias? 1 : 0, inputsize());
00285     weights_norm = dot(weights_excluding_biases,weights_excluding_biases);
00286     train_costs.resize(5);
00287     train_costs[0] = squared_error + weight_decay*weights_norm;
00288     train_costs[1] = squared_error;
00289     train_costs[2] = AIC;
00290     train_costs[3] = BIC;
00291     train_costs[4] = (AIC+BIC)/2;
00292     train_stats->update(train_costs);
00293     train_stats->finalize(); 
00294 }
00295 
00296 
00297 void LinearRegressor::computeOutput(const Vec& actual_input, Vec& output) const
00298 {
00299     // If 'output_learned_weights', don't compute the linear regression at
00300     // all, but instead flatten the weights vector and output it
00301     if (output_learned_weights) {
00302         output << weights.toVec();
00303         return;
00304     }
00305   
00306     // Compute the output from the input
00307     extendedinput.resize(effective_inputsize());
00308     input = extendedinput;
00309     if (include_bias) {
00310         input = extendedinput.subVec(1,inputsize());
00311         extendedinput[0] = 1.0;
00312     }
00313     input << actual_input;
00314     output.resize(outputsize());
00315     transposeProduct(output,weights,extendedinput);
00316 }
00317 
00318 void LinearRegressor::computeCostsFromOutputs(
00319     const Vec& /*input*/, const Vec& output, const Vec& target, Vec& costs) const
00320 {
00321     // If 'output_learned_weights', there is no test cost
00322     if (output_learned_weights)
00323         return;
00324   
00325     // Compute the costs from *already* computed output. 
00326     costs.resize(5);
00327     real squared_loss = powdistance(output,target);
00328     costs[0] = squared_loss + weight_decay*weights_norm;
00329     costs[1] = squared_loss;
00330 
00331     // The AIC/BIC/MABIC costs are computed at TRAINING-TIME and remain
00332     // constant thereafter.  Simply append the already-computed costs.
00333     costs[2] = AIC;
00334     costs[3] = BIC;
00335     costs[4] = (AIC+BIC)/2;
00336 }
00337 
00338 bool LinearRegressor::computeConfidenceFromOutput(
00339     const Vec&, const Vec& output, real probability,
00340     TVec< pair<real,real> >& intervals) const
00341 {
00342     // The option 'output_learned_weights' is incompatible with confidence...
00343     if (output_learned_weights)
00344         PLERROR("LinearRegressor::computeConfidenceFromOutput: the option "
00345                 "'output_learned_weights' is incompatible with confidence.");
00346   
00347     const int n = output.size();
00348     if (n != resid_variance.size())
00349         PLERROR("LinearRegressor::computeConfidenceFromOutput: output vector "
00350                 "size (=%d) is incorrect or residuals variance (=%d) not yet computed",n,resid_variance.size());
00351   
00352     // two-tailed
00353     const real multiplier = gauss_01_quantile((1+probability)/2);
00354     intervals.resize(n);
00355     for (int i=0; i<n; ++i) {
00356         real half_width = multiplier * sqrt(resid_variance[i]);
00357         intervals[i] = std::make_pair(output[i] - half_width,
00358                                       output[i] + half_width);
00359     }
00360     return true;
00361 }
00362 
00363 TVec<string> LinearRegressor::getTestCostNames() const
00364 {
00365     // If 'output_learned_weights', there is no test cost
00366     if (output_learned_weights)
00367         return TVec<string>();
00368     else
00369         return getTrainCostNames();
00370 }
00371 
00372 TVec<string> LinearRegressor::getTrainCostNames() const
00373 {
00374     // Return the names of the objective costs that the train method computes
00375     // and for which it updates the VecStatsCollector train_stats
00376     TVec<string> names;
00377     names.push_back("mse+penalty");
00378     names.push_back("mse");
00379     names.push_back("aic");
00380     names.push_back("bic");
00381     names.push_back("mabic");
00382     return names;
00383 }
00384 
00385 void LinearRegressor::computeInformationCriteria(real squared_error, int n)
00386 {
00387     // AIC = ln(squared_error/n) + 2*M/n
00388     // BIC = ln(squared_error/n) + M*ln(n)/n,
00389     // where M is the number of parameters
00390     // NOTE the change in semantics: squared_error is now a MEAN squared error
00391 
00392     real M = weights.length() * weights.width();
00393     real lnsqerr = pl_log(squared_error);
00394     AIC = lnsqerr + 2*M/n;
00395     BIC = lnsqerr + M*pl_log(real(n))/n;
00396 }
00397 
00398 void LinearRegressor::computeResidualsVariance(const Vec&
00399                                                outputwise_sum_squared_Y)
00400 {
00401     // The following formula (for the unweighted case) is used:
00402     //
00403     //    e'e = y'y - b'X'Xb
00404     //
00405     // where e is the residuals of the regression (for a single output), y
00406     // is a column of targets (for a single output), b is the weigths
00407     // vector, and X is the matrix of regressors.  From this point, use the
00408     // fact that an estimator of sigma is given by
00409     //
00410     //   sigma_squared = e'e / (N-K),
00411     //
00412     // where N is the size of the training set and K is the extended input
00413     // size (i.e. the length of the b vector).
00414     const int ninputs  = weights.length();
00415     const int ntargets = weights.width();
00416     const int N = train_set.length();
00417     
00418     Vec b(ninputs);
00419     Vec XtXb(ninputs);
00420     resid_variance.resize(ntargets);
00421     
00422     for (int i=0; i<ntargets; ++i) {
00423         b << weights.column(i);
00424         product(XtXb, XtX, b);
00425         resid_variance[i] =
00426             (outputwise_sum_squared_Y[i] - dot(b,XtXb)) / (N-ninputs);
00427     }
00428 }
00429 
00430 } // end of namespace PLearn
00431 
00432 
00433 /*
00434   Local Variables:
00435   mode:c++
00436   c-basic-offset:4
00437   c-file-style:"stroustrup"
00438   c-file-offsets:((innamespace . 0)(inline-open . 0))
00439   indent-tabs-mode:nil
00440   fill-column:79
00441   End:
00442 */
00443 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines