PLearn 0.1
regressors/GaussianProcessRegressor.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianProcessRegressor.cc
00004 //
00005 // Copyright (C) 2006-2009 Nicolas Chapados 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036    * $Id: .pyskeleton_header 544 2003-09-01 00:05:31Z plearner $ 
00037    ******************************************************* */
00038 
00039 // Authors: Nicolas Chapados
00040 
00043 #define PL_LOG_MODULE_NAME "GaussianProcessRegressor"
00044 
00045 // From PLearn
00046 #include "GaussianProcessRegressor.h"
00047 #include <plearn/base/stringutils.h>
00048 #include <plearn/vmat/ExtendedVMatrix.h>
00049 #include <plearn/math/pl_erf.h>
00050 #include <plearn/var/GaussianProcessNLLVariable.h>
00051 #include <plearn/var/ObjectOptionVariable.h>
00052 #include <plearn/opt/Optimizer.h>
00053 #include <plearn/io/pl_log.h>
00054 
00055 #ifdef USE_BLAS_SPECIALISATIONS
00056 #include <plearn/math/plapack.h>
00057 #endif
00058 
00059 namespace PLearn {
00060 using namespace std;
00061 
00062 PLEARN_IMPLEMENT_OBJECT(
00063     GaussianProcessRegressor,
00064     "Implements Gaussian Process Regression (GPR) with an arbitrary kernel",
00065     "Given a kernel K(x,y) = phi(x)'phi(y), where phi(x) is the projection of a\n"
00066     "vector x into feature space, this class implements a version of the ridge\n"
00067     "estimator, giving the prediction at x as\n"
00068     "\n"
00069     "    f(x) = k(x)'(M + lambda I)^-1 y,\n"
00070     "\n"
00071     "where x is the test vector where to estimate the response, k(x) is the\n"
00072     "vector of kernel evaluations between the test vector and the elements of\n"
00073     "the training set, namely\n"
00074     "\n"
00075     "    k(x) = (K(x,x1), K(x,x2), ..., K(x,xN))',\n"
00076     "\n"
00077     "M is the Gram Matrix on the elements of the training set, i.e. the matrix\n"
00078     "where the element (i,j) is equal to K(xi, xj), lambda is the VARIANCE of\n"
00079     "the observation noise (and can be interpreted as a weight decay\n"
00080     "coefficient), and y is the vector of training-set targets.\n"
00081     "\n"
00082     "The uncertainty in a prediction can be computed by calling\n"
00083     "computeConfidenceFromOutput.  Furthermore, if desired, this learner allows\n"
00084     "optimization of the kernel hyperparameters by direct optimization of the\n"
00085     "marginal likelihood w.r.t. the hyperparameters.  This mechanism relies on a\n"
00086     "user-provided Optimizer (see the 'optimizer' option) and does not rely on\n"
00087     "the PLearn HyperLearner system.\n"
00088     "\n"
00089     "GaussianProcessRegressor produces the following train costs:\n"
00090     "\n"
00091     "- \"nmll\" : the negative marginal log-likelihood on the training set.\n"
00092     "- \"mse\"  : the mean-squared error on the training set (by convention,\n"
00093     "           divided by two)\n"
00094     "\n"
00095     "and the following test costs:\n"
00096     "\n"
00097     "- \"nll\" : the negative log-likelihood of the test example under the\n"
00098     "          predictive distribution.  Available only if the option\n"
00099     "          'compute_confidence' is true.\n"
00100     "- \"mse\" : the squared error of the test example with respect to the\n"
00101     "          predictive mean (by convention, divided by two).\n"
00102     "\n"
00103     "The disadvantage of this learner is that its training time is O(N^3) in the\n"
00104     "number of training examples (due to the matrix inversion).  When saving the\n"
00105     "learner, the training set inputs must be saved, along with an additional\n"
00106     "matrix of length number-of-training-examples, and width number-of-targets.\n"
00107     "\n"
00108     "To alleviate the computational bottleneck of the exact method, the sparse\n"
00109     "approximation method of Projected Process is also available.  This method\n"
00110     "requires identifying M datapoints in the training set called the active\n"
00111     "set, although it makes use of all N training points for computing the\n"
00112     "likelihood.  The computational complexity of the approach is then O(NM^2).\n"
00113     "Note that in the current implementation, hyperparameter optimization is\n"
00114     "performed using ONLY the active set (called the \"Subset of Data\" method in\n"
00115     "the Rasmussen & Williams book).  Making use of the full set of datapoints\n"
00116     "is more computationally expensive and would require substantial updates to\n"
00117     "the PLearn Kernel class (to efficiently support asymmetric kernel-matrix\n"
00118     "gradient).  This may come later.\n"
00119     );
00120 
00121 GaussianProcessRegressor::GaussianProcessRegressor() 
00122     : m_weight_decay(0.0),
00123       m_include_bias(true),
00124       m_compute_confidence(false),
00125       m_confidence_epsilon(1e-8),
00126       m_save_gram_matrix(false),
00127       m_solution_algorithm("exact")
00128 { }
00129 
00130 
00131 void GaussianProcessRegressor::declareOptions(OptionList& ol)
00132 {
00133     //#####  Build Options  ###################################################
00134     
00135     declareOption(
00136         ol, "kernel", &GaussianProcessRegressor::m_kernel,
00137         OptionBase::buildoption,
00138         "Kernel to use for the computation.  This must be a similarity kernel\n"
00139         "(i.e. closer vectors give higher kernel evaluations).");
00140 
00141     declareOption(
00142         ol, "weight_decay", &GaussianProcessRegressor::m_weight_decay,
00143         OptionBase::buildoption,
00144         "Weight decay coefficient (default = 0)");
00145 
00146     declareOption(
00147         ol, "include_bias", &GaussianProcessRegressor::m_include_bias,
00148         OptionBase::buildoption,
00149         "Whether to include a bias term in the regression (true by default)\n"
00150         "The effect of this option is NOT to prepend a column of 1 to the inputs\n"
00151         "(which has often no effect for GP regression), but to estimate a\n"
00152         "separate mean of the targets, perform the GP regression on the\n"
00153         "zero-mean targets, and add it back when computing the outputs.\n");
00154 
00155     declareOption(
00156         ol, "compute_confidence", &GaussianProcessRegressor::m_compute_confidence,
00157         OptionBase::buildoption,
00158         "Whether to perform the additional train-time computations required\n"
00159         "to compute confidence intervals.  This includes computing a separate\n"
00160         "inverse of the Gram matrix.  Specification of this option is necessary\n"
00161         "for calling both computeConfidenceFromOutput and computeOutputCovMat.\n");
00162 
00163     declareOption(
00164         ol, "confidence_epsilon", &GaussianProcessRegressor::m_confidence_epsilon,
00165         OptionBase::buildoption,
00166         "Small regularization to be added post-hoc to the computed output\n"
00167         "covariance matrix and confidence intervals; this is mostly used as a\n"
00168         "disaster prevention device, to avoid negative predictive variance\n");
00169     
00170     declareOption(
00171         ol, "hyperparameters", &GaussianProcessRegressor::m_hyperparameters,
00172         OptionBase::buildoption,
00173         "List of hyperparameters to optimize.  They must be specified in the\n"
00174         "form \"option-name\":initial-value, where 'option-name' is the name\n"
00175         "of an option to set within the Kernel object (the array-index form\n"
00176         "'option[i]' is supported), and 'initial-value' is the\n"
00177         "(PLearn-serialization string representation) for starting point for the\n"
00178         "optimization.  Currently, the hyperparameters are constrained to be\n"
00179         "scalars.\n");
00180 
00181     declareOption(
00182         ol, "ARD_hyperprefix_initval",
00183         &GaussianProcessRegressor::m_ARD_hyperprefix_initval,
00184         OptionBase::buildoption,
00185         "If the kernel support automatic relevance determination (ARD; e.g.\n"
00186         "SquaredExponentialARDKernel), the list of hyperparameters corresponding\n"
00187         "to each input can be created automatically by giving an option prefix\n"
00188         "and an initial value.  The ARD options are created to have the form\n"
00189         "\n"
00190         "   'prefix[0]', 'prefix[1]', 'prefix[N-1]'\n"
00191         "\n"
00192         "where N is the number of inputs.  This option is useful when the\n"
00193         "dataset inputsize is not (easily) known ahead of time. \n");
00194     
00195     declareOption(
00196         ol, "optimizer", &GaussianProcessRegressor::m_optimizer,
00197         OptionBase::buildoption,
00198         "Specification of the optimizer to use for train-time hyperparameter\n"
00199         "optimization.  A ConjGradientOptimizer should be an adequate choice.\n");
00200 
00201     declareOption(
00202         ol, "save_gram_matrix", &GaussianProcessRegressor::m_save_gram_matrix,
00203         OptionBase::buildoption,
00204         "If true, the Gram matrix is saved before undergoing Cholesky each\n"
00205         "decomposition; useful for debugging if the matrix is quasi-singular.\n"
00206         "It is saved in the current expdir under the names 'gram_matrix_N.pmat'\n"
00207         "where N is an increasing counter.\n");
00208 
00209     declareOption(
00210         ol, "solution_algorithm", &GaussianProcessRegressor::m_solution_algorithm,
00211         OptionBase::buildoption,
00212         "Solution algorithm used for the regression.  If \"exact\", use the exact\n"
00213         "Gaussian process solution (requires O(N^3) computation).  If\n"
00214         "\"projected-process\", use the PP approximation, which requires O(MN^2)\n"
00215         "computation, where M is given by the size of the active training\n"
00216         "examples specified by the \"active-set\" option.  Default=\"exact\".\n");
00217 
00218     declareOption(
00219         ol, "active_set_indices", &GaussianProcessRegressor::m_active_set_indices,
00220         OptionBase::buildoption,
00221         "If a sparse approximation algorithm is used (e.g. projected process),\n"
00222         "this specifies the indices of the training-set examples which should be\n"
00223         "considered to be part of the active set.  Note that these indices must\n"
00224         "be SORTED IN INCREASING ORDER and should not contain duplicates.\n");
00225 
00226     
00227     //#####  Learnt Options  ##################################################
00228 
00229     declareOption(
00230         ol, "alpha", &GaussianProcessRegressor::m_alpha,
00231         OptionBase::learntoption,
00232         "Matrix of learned parameters, determined from the equation\n"
00233         "\n"
00234         "  (K + lambda I)^-1 y\n"
00235         "\n"
00236         "(don't forget that y can be a matrix for multivariate output problems)\n"
00237         "\n"
00238         "In the case of the projected-process approximation, this contains\n"
00239         "the result of the equiation\n"
00240         "\n"
00241         "  (lambda K_mm + K_mn K_nm)^-1 K_mn y\n");
00242 
00243     declareOption(
00244         ol, "gram_inverse", &GaussianProcessRegressor::m_gram_inverse,
00245         OptionBase::learntoption,
00246         "Inverse of the Gram matrix, used to compute confidence intervals (must\n"
00247         "be saved since the confidence intervals are obtained from the equation\n"
00248         "\n"
00249         "  sigma^2 = k(x,x) - k(x)'(K + lambda I)^-1 k(x)\n"
00250         "\n"
00251         "An adjustment similar to 'alpha' is made for the projected-process\n"
00252         "approximation.\n");
00253 
00254     declareOption(
00255         ol, "subgram_inverse", &GaussianProcessRegressor::m_subgram_inverse,
00256         OptionBase::learntoption,
00257         "Inverse of the sub-Gram matrix, i.e. K_mm^-1.  Used only with the\n"
00258         "projected-process approximation.\n");
00259     
00260     declareOption(
00261         ol, "target_mean", &GaussianProcessRegressor::m_target_mean,
00262         OptionBase::learntoption,
00263         "Mean of the targets, if the option 'include_bias' is true");
00264     
00265     declareOption(
00266         ol, "training_inputs", &GaussianProcessRegressor::m_training_inputs,
00267         OptionBase::learntoption,
00268         "Saved version of the training set, which must be kept along for\n"
00269         "carrying out kernel evaluations with the test point.  If using the\n"
00270         "projected-process approximation, only the inputs in the active set are\n"
00271         "saved.");
00272 
00273     // Now call the parent class' declareOptions
00274     inherited::declareOptions(ol);
00275 }
00276 
00277 void GaussianProcessRegressor::build_()
00278 {
00279     if (! m_kernel)
00280         PLERROR("GaussianProcessRegressor::build_: 'kernel' option must be specified");
00281 
00282     if (! m_kernel->is_symmetric)
00283         PLERROR("GaussianProcessRegressor::build_: the kernel (%s) must be symmetric",
00284                 m_kernel->classname().c_str());
00285     
00286     // If we are reloading the model, set the training inputs into the kernel
00287     if (m_training_inputs.size() > 0)
00288         m_kernel->setDataForKernelMatrix(m_training_inputs);
00289 
00290     // If we specified hyperparameters without an optimizer, complain.
00291     // (It is mildly legal to specify an optimizer without hyperparameters;
00292     // this does nothing).
00293     if (m_hyperparameters.size() > 0 && ! m_optimizer)
00294         PLERROR("GaussianProcessRegressor::build_: 'hyperparameters' are specified "
00295                 "but no 'optimizer'; an optimizer is required in order to carry out "
00296                 "hyperparameter optimization");
00297 
00298     if (m_confidence_epsilon < 0)
00299         PLERROR("GaussianProcessRegressor::build_: 'confidence_epsilon' must be non-negative");
00300 
00301     // Cache solution algorithm in quick form
00302     if (m_solution_algorithm == "exact")
00303         m_algorithm_enum = AlgoExact;
00304     else if (m_solution_algorithm == "projected-process")
00305         m_algorithm_enum = AlgoProjectedProcess;
00306     else
00307         PLERROR("GaussianProcessRegressor::build_: the option solution_algorithm=='%s' "
00308                 "is not supported.  Value must be in {'exact', 'projected-process'}",
00309                 m_solution_algorithm.c_str());
00310 }
00311 
00312 // ### Nothing to add here, simply calls build_
00313 void GaussianProcessRegressor::build()
00314 {
00315     inherited::build();
00316     build_();
00317 }
00318 
00319 
00320 void GaussianProcessRegressor::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00321 {
00322     inherited::makeDeepCopyFromShallowCopy(copies);
00323 
00324     deepCopyField(m_kernel,                     copies);
00325     deepCopyField(m_hyperparameters,            copies);
00326     deepCopyField(m_optimizer,                  copies);
00327     deepCopyField(m_active_set_indices,         copies);
00328     deepCopyField(m_alpha,                      copies);
00329     deepCopyField(m_gram_inverse,               copies);
00330     deepCopyField(m_subgram_inverse,            copies);
00331     deepCopyField(m_target_mean,                copies);
00332     deepCopyField(m_training_inputs,            copies);
00333     deepCopyField(m_kernel_evaluations,         copies);
00334     deepCopyField(m_gram_inverse_product,       copies);
00335     deepCopyField(m_intervals,                  copies);
00336     deepCopyField(m_gram_traintest_inputs,      copies);
00337     deepCopyField(m_gram_inv_traintest_product, copies);
00338     deepCopyField(m_sigma_reductor,             copies);
00339 }
00340 
00341 
00342 //#####  setTrainingSet  ######################################################
00343 
00344 void GaussianProcessRegressor::setTrainingSet(VMat training_set, bool call_forget)
00345 {
00346     PLASSERT( training_set );
00347     int inputsize = training_set->inputsize() ;
00348     if (inputsize < 0)
00349         PLERROR("GaussianProcessRegressor::setTrainingSet: the training set inputsize "
00350                 "must be specified (current value = %d)", inputsize);
00351 
00352     // Convert to a real matrix in order to make saving it saner
00353     m_training_inputs = training_set.subMatColumns(0, inputsize).toMat();
00354     inherited::setTrainingSet(training_set, call_forget);
00355 }
00356 
00357 
00358 //#####  outputsize  ##########################################################
00359 
00360 int GaussianProcessRegressor::outputsize() const
00361 {
00362     return targetsize();
00363 }
00364 
00365 
00366 //#####  forget  ##############################################################
00367 
00368 void GaussianProcessRegressor::forget()
00369 {
00370     inherited::forget();
00371     if (m_optimizer)
00372         m_optimizer->reset();
00373     m_alpha.resize(0,0);
00374     m_target_mean.resize(0);
00375     m_gram_inverse.resize(0,0);
00376     stage = 0;
00377 }
00378     
00379 
00380 //#####  train  ###############################################################
00381 
00382 void GaussianProcessRegressor::train()
00383 {
00384     // This generic PLearner method does a number of standard stuff useful for
00385     // (almost) any learner, and return 'false' if no training should take
00386     // place. See PLearner.h for more details.
00387     if (!initTrain())
00388         return;
00389 
00390     // If we use the projected process approximation, make sure that the
00391     // active-set indices are specified and that they are sorted in increasing
00392     // order
00393     if (m_algorithm_enum == AlgoProjectedProcess) {
00394         if (m_active_set_indices.size() == 0)
00395             PLERROR("GaussianProcessRegressor::train: with the projected-process "
00396                     "approximation, the active_set_indices option must be specified.");
00397         int last_index = -1;
00398         for (int i=0, n=m_active_set_indices.size() ; i<n ; ++i) {
00399             int cur_index = m_active_set_indices[i];
00400             if (cur_index <= last_index)
00401                 PLERROR("GaussianProcessRegressor::train: the option active_set_indices "
00402                         "must be sorted and should not contain duplicates; at index %d, "
00403                         "encounted value %d whereas previous value was %d.",
00404                         i, cur_index, last_index);
00405             last_index = cur_index;
00406         }
00407     }
00408     
00409     PLASSERT( m_kernel );
00410     if (! train_set || ! m_training_inputs)
00411         PLERROR("GaussianProcessRegressor::train: the training set must be specified");
00412     int trainlength = train_set->length();
00413     int activelength= ( m_algorithm_enum == AlgoProjectedProcess?
00414                         m_active_set_indices.size() : trainlength );
00415     int inputsize   = train_set->inputsize() ;
00416     int targetsize  = train_set->targetsize();
00417     int weightsize  = train_set->weightsize();
00418     if (inputsize  < 0 || targetsize < 0 || weightsize < 0)
00419         PLERROR("GaussianProcessRegressor::train: inconsistent inputsize/targetsize/weightsize "
00420                 "(%d/%d/%d) in training set", inputsize, targetsize, weightsize);
00421     if (weightsize > 0)
00422         PLERROR("GaussianProcessRegressor::train: observations weights are not currently supported");
00423 
00424     // Subtract the mean if we require it
00425     Mat targets(trainlength, targetsize);
00426     train_set.subMatColumns(inputsize, targetsize)->getMat(0,0,targets);
00427     if (m_include_bias) {
00428         m_target_mean.resize(targets.width());
00429         columnMean(targets, m_target_mean);
00430         targets -= m_target_mean;
00431     }
00432 
00433     // Determine the subset of training inputs and targets to use depending on
00434     // the training algorithm
00435     Mat sub_training_inputs;
00436     Mat sub_training_targets;
00437     if (m_algorithm_enum == AlgoExact) {
00438         sub_training_inputs = m_training_inputs;
00439         sub_training_targets= targets;
00440     }
00441     else if (m_algorithm_enum == AlgoProjectedProcess) {
00442         sub_training_inputs .resize(activelength, inputsize);
00443         sub_training_targets.resize(activelength, targetsize);
00444         selectRows(m_training_inputs, m_active_set_indices, sub_training_inputs);
00445         selectRows(targets,           m_active_set_indices, sub_training_targets);
00446     }
00447     
00448     // Optimize hyperparameters
00449     VarArray hyperparam_vars;
00450     PP<GaussianProcessNLLVariable> nll =
00451         hyperOptimize(sub_training_inputs, sub_training_targets, hyperparam_vars);
00452     PLASSERT( nll );
00453     
00454     // Compute parameters.  Be careful to also propagate through the
00455     // hyperparameter variables to ensure the latest values are correctly set
00456     // into their respective kernels.
00457     hyperparam_vars.fprop();
00458     nll->fprop();
00459     if (m_algorithm_enum == AlgoExact) {
00460         m_alpha = nll->alpha();
00461         m_gram_inverse = nll->gramInverse();
00462     }
00463     else if (m_algorithm_enum == AlgoProjectedProcess) {
00464         trainProjectedProcess(m_training_inputs, sub_training_inputs, targets);
00465 
00466         // Full training set no longer required from now on
00467         m_training_inputs = sub_training_inputs;
00468         m_kernel->setDataForKernelMatrix(m_training_inputs);
00469     }
00470 
00471     if (getTrainStatsCollector()) {
00472         // Compute train statistics by running a test over the training set.
00473         // This works uniformly for all solution algorithms, albeit with some
00474         // performance hit.
00475         PP<VecStatsCollector> test_stats = new VecStatsCollector;
00476         test(getTrainingSet(), test_stats);
00477     
00478         // And accumulate some statistics.  Note: the NLL corresponds to the
00479         // subset-of-data version if the projected-process approximation is
00480         // used.  It is the exact NLL if the exact algorithm is used.
00481         Vec costs(3);
00482         costs.subVec(0,2) << test_stats->getMean();
00483         costs[2] = nll->value[0];
00484         getTrainStatsCollector()->update(costs);
00485     }
00486     MODULE_LOG << "Train marginal NLL (subset-of-data): " << nll->value[0] << endl;
00487 }
00488 
00489 
00490 //#####  computeOutput  #######################################################
00491 
00492 void GaussianProcessRegressor::computeOutput(const Vec& input, Vec& output) const
00493 {
00494     PLASSERT( m_kernel && m_alpha.isNotNull() && m_training_inputs.size() > 0 );
00495     PLASSERT( m_alpha.width()  == output.size() );
00496     PLASSERT( m_alpha.length() == m_training_inputs.length() );
00497     PLASSERT( input.size()     == m_training_inputs.width()  );
00498 
00499     m_kernel_evaluations.resize(m_alpha.length());
00500     computeOutputAux(input, output, m_kernel_evaluations);
00501 }
00502 
00503 
00504 void GaussianProcessRegressor::computeOutputAux(
00505     const Vec& input, Vec& output, Vec& kernel_evaluations) const
00506 {
00507     if (input.hasMissing()) {
00508         output.fill(MISSING_VALUE);
00509         kernel_evaluations.fill(MISSING_VALUE);
00510         return;
00511     }
00512     
00513     m_kernel->evaluate_all_i_x(input, kernel_evaluations);
00514 
00515     // Finally compute k(x,x_i) * (K + \lambda I)^-1 y.
00516     // This expression does not change depending on whether we are using
00517     // the exact algorithm or the projected-process approximation.
00518     product(Mat(1, output.size(), output),
00519             Mat(1, kernel_evaluations.size(), kernel_evaluations),
00520             m_alpha);
00521 
00522     if (m_include_bias)
00523         output += m_target_mean;
00524 }
00525 
00526 
00527 //#####  computeCostsFromOutputs  #############################################
00528 
00529 void GaussianProcessRegressor::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00530                                                        const Vec& target, Vec& costs) const
00531 {
00532     costs.resize(2);
00533 
00534     // NLL cost is the NLL of the target under the predictive distribution
00535     // (centered at predictive mean, with variance obtainable from the
00536     // confidence bounds).  HOWEVER, to obain it, we have to be able to compute
00537     // the confidence bounds.  If impossible, simply set missing-value for the
00538     // NLL cost.
00539     if (m_compute_confidence) {
00540 #ifdef BOUNDCHECK
00541         static const float PROBABILITY = pl_erf(1. / (2*sqrt(2.0)));  // 0.5 stddev
00542         bool confavail = computeConfidenceFromOutput(input, output, PROBABILITY,
00543                                                      m_intervals);
00544 #endif
00545         PLASSERT( confavail && m_intervals.size() == output.size() &&
00546                   output.size() == target.size() );
00547         static const real LN_2PI_OVER_2 = pl_log(2*M_PI) / 2.0;
00548         real nll = 0;
00549         for (int i=0, n=output.size() ; i<n ; ++i) {
00550             real sigma = m_intervals[i].second - m_intervals[i].first;
00551             sigma = max(sigma, real(1e-15));        // Very minor regularization
00552             real diff = target[i] - output[i];
00553             nll += diff*diff / (2.*sigma*sigma) + pl_log(sigma) + LN_2PI_OVER_2;
00554         }
00555         costs[0] = nll;
00556     }
00557     else
00558         costs[0] = MISSING_VALUE;
00559     
00560     real squared_loss = 0.5*powdistance(output,target);
00561     costs[1] = squared_loss;
00562 }     
00563 
00564 
00565 //#####  computeConfidenceFromOutput  #########################################
00566 
00567 bool GaussianProcessRegressor::computeConfidenceFromOutput(
00568     const Vec& input, const Vec& output, real probability,
00569     TVec< pair<real,real> >& intervals) const
00570 {
00571     if (! m_compute_confidence) {
00572         PLWARNING("GaussianProcessRegressor::computeConfidenceFromOutput: the option\n"
00573                   "'compute_confidence' must be true in order to compute valid\n"
00574                   "condidence intervals");
00575         return false;
00576     }
00577 
00578     // BIG assumption: assume that computeOutput has just been called and that
00579     // m_kernel_evaluations contains the right stuff.
00580     PLASSERT( m_kernel && m_gram_inverse.isNotNull() );
00581     real base_sigma_sq = m_kernel(input, input);
00582     m_gram_inverse_product.resize(m_kernel_evaluations.size());
00583 
00584     real sigma;
00585     if (m_algorithm_enum == AlgoExact) {
00586         product(m_gram_inverse_product, m_gram_inverse, m_kernel_evaluations);
00587         real sigma_reductor = dot(m_gram_inverse_product, m_kernel_evaluations);
00588         sigma = sqrt(max(real(0.),
00589                          base_sigma_sq - sigma_reductor + m_confidence_epsilon));
00590     }
00591     else if (m_algorithm_enum == AlgoProjectedProcess) {
00592         // From R&W eq. (8.27).
00593         product(m_gram_inverse_product, m_subgram_inverse, m_kernel_evaluations);
00594         productScaleAcc(m_gram_inverse_product, m_gram_inverse, m_kernel_evaluations,
00595                         -1.0, 1.0);
00596         real sigma_reductor = dot(m_gram_inverse_product, m_kernel_evaluations);
00597         sigma = sqrt(max(real(0.),
00598                          base_sigma_sq - sigma_reductor + m_confidence_epsilon));
00599     }
00600 
00601     // two-tailed
00602     const real multiplier = gauss_01_quantile((1+probability)/2);
00603     real half_width = multiplier * sigma;
00604     intervals.resize(output.size());
00605     for (int i=0, n=output.size() ; i<n ; ++i)
00606         intervals[i] = std::make_pair(output[i] - half_width,
00607                                       output[i] + half_width);
00608     return true;
00609 }
00610 
00611 
00612 //#####  computeOutputCovMat  #################################################
00613 
00614 void GaussianProcessRegressor::computeOutputCovMat(
00615     const Mat& inputs, Mat& outputs, TVec<Mat>& covariance_matrices) const
00616 {
00617     PLASSERT( m_kernel && m_alpha.isNotNull() && m_training_inputs.size() > 0 );
00618     PLASSERT( m_alpha.width()  == outputsize() );
00619     PLASSERT( m_alpha.length() == m_training_inputs.length() );
00620     PLASSERT( inputs.width()   == m_training_inputs.width()  );
00621     PLASSERT( inputs.width()   == inputsize() );
00622     const int N = inputs.length();
00623     const int M = outputsize();
00624     const int T = m_training_inputs.length();
00625     outputs.resize(N, M);
00626     covariance_matrices.resize(M);
00627 
00628     // Preallocate space for the covariance matrix, and since all outputs share
00629     // the same matrix, copy it into the remaining elements of
00630     // covariance_matrices
00631     Mat& covmat = covariance_matrices[0];
00632     covmat.resize(N, N);
00633     for (int j=1 ; j<M ; ++j)
00634         covariance_matrices[j] = covmat;
00635 
00636     // Start by computing the matrix of kernel evaluations between the train
00637     // and test outputs, and compute the output
00638     m_gram_traintest_inputs.resize(N, T);
00639     bool has_missings = false;
00640     for (int i=0 ; i<N ; ++i) {
00641         Vec cur_traintest_kereval = m_gram_traintest_inputs(i);
00642         Vec cur_output = outputs(i);
00643         computeOutputAux(inputs(i), cur_output, cur_traintest_kereval);
00644         has_missings = has_missings || inputs(i).hasMissing();
00645     }
00646 
00647     // If any missings found in the inputs, don't bother with computing a
00648     // covariance matrix
00649     if (has_missings) {
00650         covmat.fill(MISSING_VALUE);
00651         return;
00652     }
00653 
00654     // Next compute the kernel evaluations between the test inputs; more or
00655     // less lifted from Kernel.cc ==> must see with Olivier how to better
00656     // factor this code
00657     Mat& K = covmat;
00658 
00659     PLASSERT( K.width() == N && K.length() == N );
00660     const int mod = K.mod();
00661     real Kij;
00662     real* Ki;
00663     real* Kji;
00664     for (int i=0 ; i<N ; ++i) {
00665         Ki  = K[i];
00666         Kji = &K[0][i];
00667         const Vec& cur_input_i = inputs(i);
00668         for (int j=0 ; j<=i ; ++j, Kji += mod) {
00669             Kij = m_kernel->evaluate(cur_input_i, inputs(j));
00670             *Ki++ = Kij;
00671             if (j<i)
00672                 *Kji = Kij;    // Assume symmetry, checked at build
00673         }
00674     }
00675 
00676     // The predictive covariance matrix is for the exact cast(c.f. Rasmussen
00677     // and Williams):
00678     //
00679     //    cov(f*) = K(X*,X*) - K(X*,X) [K(X,X) + sigma*I]^-1 K(X,X*)
00680     //
00681     // where X are the training inputs, and X* are the test inputs.
00682     //
00683     // For the projected process case, it is:
00684     //
00685     //    cov(f*) = K(X*,X*) - K(X*,X_m) K_mm^-1 K(X*,X_m)
00686     //               + sigma^2 K(X*,X_m) (sigma^2 K_mm + K_mn K_nm)^-1 K(X*,X_m)
00687     //
00688     // Note that all sigma^2's have been absorbed into their respective
00689     // cached terms, and in particular in this context sigma^2 is emphatically
00690     // not equal to the weight decay.
00691     m_gram_inv_traintest_product.resize(T,N);
00692     m_sigma_reductor.resize(N,N);
00693 
00694     if (m_algorithm_enum == AlgoExact) {    
00695         productTranspose(m_gram_inv_traintest_product, m_gram_inverse,
00696                          m_gram_traintest_inputs);
00697         product(m_sigma_reductor, m_gram_traintest_inputs,
00698                 m_gram_inv_traintest_product);
00699     }
00700     else if (m_algorithm_enum == AlgoProjectedProcess) {
00701         productTranspose(m_gram_inv_traintest_product, m_subgram_inverse,
00702                          m_gram_traintest_inputs);
00703         productTransposeScaleAcc(m_gram_inv_traintest_product, m_gram_inverse,
00704                                  m_gram_traintest_inputs, -1.0, 1.0);
00705         product(m_sigma_reductor, m_gram_traintest_inputs,
00706                 m_gram_inv_traintest_product);
00707     }
00708     
00709     covmat -= m_sigma_reductor;
00710 
00711     // As a preventive measure, never output negative variance, even though
00712     // this does not garantee the non-negative-definiteness of the matrix
00713     for (int i=0 ; i<N ; ++i)
00714         covmat(i,i) = max(real(0.0), covmat(i,i) + m_confidence_epsilon);
00715 }
00716 
00717 
00718 //#####  get*CostNames  #######################################################
00719 
00720 TVec<string> GaussianProcessRegressor::getTestCostNames() const
00721 {
00722     TVec<string> c(2);
00723     c[0] = "nll";
00724     c[1] = "mse";
00725     return c;
00726 }
00727 
00728 
00729 TVec<string> GaussianProcessRegressor::getTrainCostNames() const
00730 {
00731     TVec<string> c(3);
00732     c[0] = "nll";
00733     c[1] = "mse";
00734     c[2] = "marginal-nll";
00735     return c;
00736 }
00737 
00738 
00739 //#####  hyperOptimize  #######################################################
00740 
00741 PP<GaussianProcessNLLVariable>
00742 GaussianProcessRegressor::hyperOptimize(const Mat& inputs, const Mat& targets,
00743                                         VarArray& hyperparam_vars)
00744 {
00745     // If there are no hyperparameters or optimizer, just create a simple
00746     // variable and return it right away.
00747     if (! m_optimizer || (m_hyperparameters.size() == 0 &&
00748                           m_ARD_hyperprefix_initval.first.empty()) )
00749     {
00750         return new GaussianProcessNLLVariable(
00751             m_kernel, m_weight_decay, inputs, targets,
00752             TVec<string>(), VarArray(), m_compute_confidence,
00753             m_save_gram_matrix, getExperimentDirectory());
00754     }
00755 
00756     // Otherwise create Vars that wrap each hyperparameter
00757     const int numhyper  = m_hyperparameters.size();
00758     const int numinputs = ( ! m_ARD_hyperprefix_initval.first.empty() ?
00759                             inputsize() : 0 );
00760     hyperparam_vars = VarArray(numhyper + numinputs);
00761     TVec<string> hyperparam_names(numhyper + numinputs);
00762     int i;
00763     for (i=0 ; i<numhyper ; ++i) {
00764         hyperparam_names[i] = m_hyperparameters[i].first;
00765         hyperparam_vars [i] = new ObjectOptionVariable(
00766             (Kernel*)m_kernel, m_hyperparameters[i].first, m_hyperparameters[i].second);
00767         hyperparam_vars[i]->setName(m_hyperparameters[i].first);
00768     }
00769 
00770     // If specified, create the Vars for automatic relevance determination
00771     string& ARD_name = m_ARD_hyperprefix_initval.first;
00772     string& ARD_init = m_ARD_hyperprefix_initval.second;
00773     if (! ARD_name.empty()) {
00774         // Small hack to ensure the ARD vector in the kernel has proper size
00775         Vec init(numinputs, lexical_cast<double>(ARD_init));
00776         m_kernel->changeOption(ARD_name, tostring(init, PStream::plearn_ascii));
00777         
00778         for (int j=0 ; j<numinputs ; ++j, ++i) {
00779             hyperparam_names[i] = ARD_name + '[' + tostring(j) + ']';
00780             hyperparam_vars [i] = new ObjectOptionVariable(
00781                 (Kernel*)m_kernel, hyperparam_names[i], ARD_init);
00782             hyperparam_vars [i]->setName(hyperparam_names[i]);
00783         }
00784     }
00785 
00786     // Create the cost-function variable
00787     PP<GaussianProcessNLLVariable> nll = new GaussianProcessNLLVariable(
00788         m_kernel, m_weight_decay, inputs, targets, hyperparam_names,
00789         hyperparam_vars, true, m_save_gram_matrix, getExperimentDirectory());
00790     nll->setName("GaussianProcessNLLVariable");
00791 
00792     // Some logging about the initial values
00793     GaussianProcessNLLVariable::logVarray(hyperparam_vars,
00794                                           "Hyperparameter initial values:");
00795     
00796     // And optimize for nstages
00797     m_optimizer->setToOptimize(hyperparam_vars, (Variable*)nll);
00798     m_optimizer->build();
00799     PP<ProgressBar> pb(
00800         report_progress? new ProgressBar("Training GaussianProcessRegressor "
00801                                          "from stage " + tostring(stage) + " to stage " +
00802                                          tostring(nstages), nstages-stage)
00803         : 0);
00804     bool early_stopping = false;
00805     PP<VecStatsCollector> statscol = new VecStatsCollector;
00806     for (const int initial_stage = stage ; !early_stopping && stage < nstages
00807              ; ++stage)
00808     {
00809         if (pb)
00810             pb->update(stage - initial_stage);
00811 
00812         statscol->forget();
00813         early_stopping = m_optimizer->optimizeN(*statscol);
00814         statscol->finalize();
00815     }
00816     pb = 0;                                  // Finish progress bar right now
00817 
00818     // Some logging about the final values
00819     GaussianProcessNLLVariable::logVarray(hyperparam_vars,
00820                                           "Hyperparameter final values:");
00821     return nll;
00822 }
00823 
00824 
00825 //#####  trainProjectedProcess (LAPACK)  ######################################
00826 
00827 void GaussianProcessRegressor::trainProjectedProcess(
00828     const Mat& all_training_inputs, const Mat& sub_training_inputs,
00829     const Mat& all_training_targets)
00830 {
00831     PLASSERT( m_kernel );
00832     const int activelength= m_active_set_indices.length();
00833     const int trainlength = all_training_inputs.length();
00834     const int targetsize  = all_training_targets.width();
00835     
00836     // The RHS matrix (when solving the linear system Gram*Params=RHS) is made
00837     // up of two parts: the regression targets themselves, and the identity
00838     // matrix if we requested them (for confidence intervals).  After solving
00839     // the linear system, set the gram-inverse appropriately.  To interface
00840     // nicely with LAPACK, we store this in a transposed format.
00841     int rhs_width = targetsize + (m_compute_confidence? activelength : 0);
00842     Mat tmp_rhs(rhs_width, activelength);
00843     if (m_compute_confidence) {
00844         Mat rhs_identity = tmp_rhs.subMatRows(targetsize, activelength);
00845         identityMatrix(rhs_identity);
00846     }
00847 
00848     // We always need to solve K_mm^-1.  Prepare the RHS with the identity
00849     // matrix to be ready to solve with a Cholesky decomposition.
00850     m_subgram_inverse.resize(activelength, activelength);
00851     Mat gram_cholesky(activelength, activelength);
00852     identityMatrix(m_subgram_inverse);
00853     
00854     // Compute Gram Matrix and add weight decay to diagonal.  This is done in a
00855     // few steps: (1) K_mm (using the active-set only), (2) then separately
00856     // compute K_mn (active-set by all examples), (3) computing the covariance
00857     // matrix of K_mn to give an m x m matrix, (4) and finally add them up.
00858     // cf. R&W p. 179, eq. 8.26 :: (sigma_n^2 K_mm + K_mn K_nm)
00859     m_kernel->setDataForKernelMatrix(all_training_inputs);
00860     Mat gram(activelength, activelength);
00861     Mat asym_gram(activelength, trainlength);
00862     Vec self_cov(activelength);
00863     m_kernel->computeTestGramMatrix(sub_training_inputs, asym_gram, self_cov);
00864     // Note: asym_gram contains K_mn without any sampling noise.
00865 
00866     // DBG_MODULE_LOG << "Asym_gram =\n" << asym_gram << endl;
00867     
00868     // Obtain K_mm, also without self-noise.  Add some jitter as per
00869     // the Rasmussen & Williams code
00870     selectColumns(asym_gram, m_active_set_indices, gram);
00871     real jitter = m_weight_decay * trace(gram);
00872     addToDiagonal(gram, jitter);
00873 
00874     // DBG_MODULE_LOG << "Kmm =\n" << gram << endl;
00875     
00876     // Obtain an estimate of the EFFECTIVE sampling noise from the
00877     // difference between self_cov and the diagonal of gram
00878     Vec sigma_sq = self_cov - diag(gram);
00879     for (int i=0, n=sigma_sq.size() ; i<n ; ++i) // ensure does not get negative
00880         sigma_sq[i] = max(m_weight_decay, sigma_sq[i]);
00881     double sigma_sq_est = mean(sigma_sq);
00882     // DBG_MODULE_LOG << "Sigma^2 estimate = " << sigma_sq_est << endl;
00883 
00884     // Before clobbering K_mm, compute its inverse.
00885     gram_cholesky << gram;
00886     lapackCholeskyDecompositionInPlace(gram_cholesky);
00887     lapackCholeskySolveInPlace(gram_cholesky, m_subgram_inverse,
00888                                true /* column-major */);
00889     
00890     gram *= sigma_sq_est;                            // sigma_n^2 K_mm
00891     productTransposeAcc(gram, asym_gram, asym_gram); // Inner part of eq. 8.26
00892 
00893     // DBG_MODULE_LOG << "Gram =\n" << gram << endl;
00894     
00895     // Dump a fragment of the Gram Matrix to the debug log
00896     DBG_MODULE_LOG << "Projected-process Gram fragment: "
00897                    << gram(0,0) << ' '
00898                    << gram(1,0) << ' '
00899                    << gram(1,1) << endl;
00900 
00901     // The RHS should contain (K_mn*y)' = y'*K_mn'.  Compute it.
00902     Mat targets_submat = tmp_rhs.subMatRows(0, targetsize);
00903     transposeTransposeProduct(targets_submat, all_training_targets, asym_gram);
00904     // DBG_MODULE_LOG << "Projected RHS =\n" << targets_submat << endl;
00905     
00906     // Compute Cholesky decomposition and solve the linear system.  LAPACK
00907     // solves in-place, but luckily we don't need either the Gram and RHS
00908     // matrices after solving.
00909     lapackCholeskyDecompositionInPlace(gram);
00910     lapackCholeskySolveInPlace(gram, tmp_rhs, true /* column-major */);
00911 
00912     // Transpose final result.  LAPACK solved in-place for tmp_rhs.
00913     m_alpha.resize(tmp_rhs.width(), tmp_rhs.length());
00914     transpose(tmp_rhs, m_alpha);
00915     if (m_compute_confidence) {
00916         m_gram_inverse = m_alpha.subMatColumns(targetsize, activelength);
00917         m_alpha        = m_alpha.subMatColumns(0, targetsize);
00918 
00919         // Absorb sigma^2 into gram_inverse as per eq. 8.27 of R&W
00920         m_gram_inverse *= sigma_sq_est;
00921     }
00922 }
00923 
00924 
00925 
00926 } // end of namespace PLearn
00927 
00928 
00929 /*
00930   Local Variables:
00931   mode:c++
00932   c-basic-offset:4
00933   c-file-style:"stroustrup"
00934   c-file-offsets:((innamespace . 0)(inline-open . 0))
00935   indent-tabs-mode:nil
00936   fill-column:79
00937   End:
00938 */
00939 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines