PLearn 0.1
distributions/DEPRECATED/GaussianProcessRegressor.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianProcessRegressor.cc
00004 //
00005 // Copyright (C) 2003 Yoshua Bengio
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 
00036  
00037 
00038 /* *******************************************************      
00039  * $Id: GaussianProcessRegressor.cc 4717 2005-12-16 15:37:01Z tihocan $
00040  ******************************************************* */
00041 
00042 #include "GaussianProcessRegressor.h"
00043 #include <plearn/math/pl_erf.h>
00044 #include <plearn/math/plapack.h>
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 GaussianProcessRegressor::GaussianProcessRegressor() : 
00050     inherited(), Gram_matrix_normalization("none"), 
00051     max_nb_evectors(-1)
00052 {}
00053 
00054 PLEARN_IMPLEMENT_OBJECT(GaussianProcessRegressor, "Basic version of Gaussian Process regression.", "NO HELP");
00055 
00056 void GaussianProcessRegressor::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00057 {
00058     inherited::makeDeepCopyFromShallowCopy(copies);
00059 
00060     // ### Call deepCopyField on all "pointer-like" fields 
00061     // ### that you wish to be deepCopied rather than 
00062     // ### shallow-copied.
00063     // ### ex:
00064     deepCopyField(kernel, copies);
00065     deepCopyField(noise_sd, copies);
00066     deepCopyField(alpha, copies);
00067     deepCopyField(Kxxi, copies);
00068     deepCopyField(Kxx, copies);
00069     deepCopyField(K, copies);
00070     deepCopyField(eigenvectors, copies);
00071     deepCopyField(eigenvalues, copies);
00072     deepCopyField(meanK, copies);
00073 }
00074 
00075 void GaussianProcessRegressor::setInput(const Vec& input) const
00076 {
00077     // compute K(x,x_i)
00078     for (int i=0;i<Kxxi.length();i++)
00079         Kxxi[i]=kernel->evaluate_x_i(input,i);
00080     // compute K(x,x)
00081     Kxx = kernel->evaluate(input,input);
00082     // apply normalization
00083     if (Gram_matrix_normalization=="centering_a_dotproduct")
00084     {
00085         real kmean = mean(Kxxi);
00086         for (int i=0;i<Kxxi.length();i++)
00087             Kxxi[i] = Kxxi[i] - kmean - meanK[i] + mean_allK;
00088         Kxx = Kxx - kmean - kmean + mean_allK;
00089     } else if (Gram_matrix_normalization=="centering_a_distance")
00090     {
00091         real kmean = mean(Kxxi);
00092         for (int i=0;i<Kxxi.length();i++)
00093             Kxxi[i] = -0.5*(Kxxi[i] - kmean - meanK[i] + mean_allK);
00094         Kxx = -0.5*(Kxx - kmean - kmean + mean_allK);
00095     }
00096     else if (Gram_matrix_normalization=="divisive")
00097     {
00098         real kmean = mean(Kxxi);
00099         for (int i=0;i<Kxxi.length();i++)
00100             Kxxi[i] = Kxxi[i]/sqrt(kmean* meanK[i]);
00101         Kxx = Kxx/kmean;
00102     }
00103 }
00104 
00105 
00106 void GaussianProcessRegressor::declareOptions(OptionList& ol)
00107 {
00108     declareOption(ol, "kernel", &GaussianProcessRegressor::kernel, OptionBase::buildoption, 
00109                   "The kernel (seen as a symmetric, two-argument function of a pair of input points)\n"
00110                   "that corresponds to the prior covariance on the function to be learned.\n");
00111 
00112     declareOption(ol, "noise_sd", &GaussianProcessRegressor::noise_sd, OptionBase::buildoption,
00113                   "Output noise std. dev. (one element per output).\n");
00114 
00115 
00116     declareOption(ol, "max_nb_evectors", &GaussianProcessRegressor::max_nb_evectors, OptionBase::buildoption,
00117                   "Maximum number of eigenvectors of the Gram matrix to compute (or -1 if all should be computed).\n");
00118 
00119 
00120     declareOption(ol, "Gram_matrix_normalization", &GaussianProcessRegressor::Gram_matrix_normalization, 
00121                   OptionBase::buildoption,
00122                   "normalization method to apply to Gram matrix. Expected values are:\n"
00123                   "\"none\": no normalization\n"
00124                   "\"centering_a_dot_product\": this is the kernel PCA centering\n"
00125                   "     K_{ij} <-- K_{ij} - mean_i(K_ij) - mean_j(K_ij) + mean_{ij}(K_ij)\n"
00126                   "\"centering_a_distance\": this is the MDS transformation of squared distances to dot products\n"
00127                   "     K_{ij} <-- -0.5(K_{ij} - mean_i(K_ij) - mean_j(K_ij) + mean_{ij}(K_ij))\n"
00128                   "\"divisive\": this is the spectral clustering and Laplacian eigenmaps normalization\n"
00129                   "     K_{ij} <-- K_{ij}/sqrt(mean_i(K_ij) mean_j(K_ij))\n");
00130 
00131 
00132     inherited::declareOptions(ol);
00133 }
00134 
00135 void GaussianProcessRegressor::build_()
00136 {
00137     if(expdir!="")
00138     {
00139         if(!force_mkdir(expdir))
00140             PLERROR("In GaussianProcessRegressor Could not create experiment directory %s",expdir.absolute().c_str());
00141         expdir = expdir.absolute() / "";
00142         // expdir = abspath(expdir);
00143     }
00144   
00145     if (train_set)
00146     {
00147         K.resize(train_set->length(),train_set->length());
00148         Kxxi.resize(train_set->length());
00149         alpha.resize(outputsize(),train_set->length());
00150         meanK.resize(train_set->length());
00151         n_outputs = train_set->targetsize();
00152     }
00153 }
00154 
00155 int GaussianProcessRegressor::outputsize() const
00156 { 
00157     int output_size=0;
00158     if (outputs_def.find("e") != string::npos)
00159         output_size+=n_outputs;
00160     if (outputs_def.find("v") != string::npos)
00161         // we only compute a diagonal output variance
00162         output_size+=n_outputs;
00163     return output_size;
00164 }
00165 
00166 void GaussianProcessRegressor::build()
00167 {
00168     inherited::build();
00169     build_();
00170 }
00171 
00172 void GaussianProcessRegressor::forget()
00173 {
00174     stage = 0;
00175 }
00176 
00177 GaussianProcessRegressor::~GaussianProcessRegressor()
00178 {
00179 }
00180 
00181 TVec<string> GaussianProcessRegressor::getTrainCostNames() const
00182 {
00183     TVec<string> names(2);
00184     names[0]="log-likelihood";
00185     names[1]="mse";
00186     return names;
00187 }
00188 
00189 TVec<string> GaussianProcessRegressor::getTestCostNames() const
00190 { return getTrainCostNames(); }
00191 
00192 int GaussianProcessRegressor::getTestCostIndex(const string& costname) const
00193 {
00194     TVec<string> costnames = getTestCostNames();
00195     for(int i=0; i<costnames.length(); i++)
00196         if(costnames[i]==costname)
00197             return i;
00198     return -1;
00199 }
00200 
00201 int GaussianProcessRegressor::getTrainCostIndex(const string& costname) const
00202 {
00203     TVec<string> costnames = getTrainCostNames();
00204     for(int i=0; i<costnames.length(); i++)
00205         if(costnames[i]==costname)
00206             return i;
00207     return -1;
00208 }
00209 
00211 double GaussianProcessRegressor::log_density(const Vec& y) const
00212 {
00213     PLERROR("GaussianProcessRegressor::log_density not implemented yet");
00214     return 0;
00215 }
00216 
00218 void GaussianProcessRegressor::expectation(Vec expected_y) const
00219 {
00220     for (int i=0;i<n_outputs;i++)
00221         expected_y[i] = dot(Kxxi,alpha(i));
00222 }
00223 
00225 Vec GaussianProcessRegressor::expectation() const
00226 {
00227     static Vec expected_target;
00228     expected_target.resize(n_outputs);
00229     expectation(expected_target);
00230     return expected_target;
00231 }
00232 
00233 void GaussianProcessRegressor::variance(Vec diag_variances) const
00234 {
00235     for (int i=0;i<n_outputs;i++)
00236     {
00237         real v = Kxx;
00238         v -= QFormInverse(noise_sd[i]*noise_sd[i],Kxxi);
00239         diag_variances[i] = v;
00240     }
00241 }
00242 
00244 Mat GaussianProcessRegressor::variance() const
00245 {
00246     static Mat var;
00247     if (var.length()!=n_outputs)
00248     {
00249         var.resize(n_outputs,n_outputs);
00250         var.clear();
00251     }
00252     for (int i=0;i<n_outputs;i++)
00253     {
00254         real v = Kxx;
00255         v -= QFormInverse(noise_sd[i]*noise_sd[i],Kxxi);
00256         var(i,i) = v;
00257     }
00258     return var;
00259 }
00260                                 
00261 void GaussianProcessRegressor::computeOutput(const Vec& input, Vec& output) const
00262 {
00263     setInput_const(input);
00264     int i0=0;
00265     if (outputs_def.find("e") != string::npos)
00266     {
00267         expectation(output.subVec(i0,n_outputs));
00268         i0+=n_outputs;
00269     }
00270     if (outputs_def.find("v") != string::npos)
00271     {
00272         variance(output.subVec(i0,n_outputs));
00273         i0+=n_outputs;
00274     }
00275 }
00276 
00277 // prediction = E[E[y|x]|training_set] = E[y|x,training_set]
00278 // prediction[j] = sum_i alpha_{ji} K(x,x_i)
00279 //               = (K(x,x_i))_i' inv(K+sigma^2[j] I) targets
00280 //
00281 // Var[y[j]|x,training_set] = Var[E[y[j]|x]|training_set] + E[Var[y[j]|x]|training_set]
00282 //  where
00283 //  Var[E[y[j]|x]|training_set] = K(x,x)- (K(x,x_i))_i' inv(K+sigma^2[j]) (K(x,x_i))_i
00284 //  and
00285 //  E[Var[y[j]|x]|training_set] = Var[y[j]|x] = sigma^2[j] = noise
00286 //
00287 // costs:
00288 //   MSE = sum_j (y[j] - prediction[j])^2
00289 //   NLL = sum_j log Normal(y[j];prediction[j],Var[y[j]|x,training_set])
00290 //
00291 void GaussianProcessRegressor::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00292                                                        const Vec& target, Vec& costs) const
00293 {
00294     Vec mu;
00295     static Vec var;
00296     int i0=0;
00297     if (outputs_def.find("e")!=string::npos)
00298     {
00299         mu = output.subVec(i0,n_outputs);
00300         i0+=n_outputs;
00301     }
00302     else
00303         mu = expectation();
00304     if (outputs_def.find("v")!=string::npos)
00305     {
00306         var = output.subVec(i0,n_outputs);
00307         i0+=n_outputs;
00308     }
00309     else
00310     {
00311         var.resize(n_outputs);
00312         variance(var);
00313     }
00314     real mse = 0;
00315     real logdensity = 0;
00316     for (int i=0;i<n_outputs;i++)
00317     {
00318         real diff=mu[i] - target[i];
00319         mse += diff*diff;
00320         logdensity += gauss_log_density_var(target[i],mu[i],var[i]+noise_sd[i]*noise_sd[i]);
00321     }
00322     costs[0]=mse;
00323     costs[1]=logdensity;
00324 }
00325 
00326 void GaussianProcessRegressor::computeOutputAndCosts(const Vec& input, const Vec& target, 
00327                                                      Vec& output, Vec& costs) const
00328 {
00329     computeOutput(input, output);
00330     computeCostsFromOutputs(input, output, target, costs);
00331 }
00332 
00333 void GaussianProcessRegressor::computeCostsOnly(const Vec& input, const Vec& target,  
00334                                                 Vec& costs) const
00335 {
00336     static Vec tmp_output;
00337     tmp_output.resize(outputsize());
00338     computeOutputAndCosts(input, target, tmp_output, costs);
00339 }
00340 
00341 void GaussianProcessRegressor::train()
00342 {
00343     // compute Gram matrix K
00344     int l=K.length();
00345     VMat input_rows = train_set.subMatColumns(0,inputsize());
00346     VMat target_rows = train_set.subMatColumns(inputsize(),targetsize());
00347     kernel->setDataForKernelMatrix(input_rows);
00348     kernel->computeGramMatrix(K);
00349 
00350     // SHOULD WE ADD THE NOISE VARIANCE BEFORE NORMALIZATION?
00351 
00352     // optionally "normalize" the gram matrix
00353     if (Gram_matrix_normalization=="centering_a_dotproduct")
00354     {
00355         columnMean(K,meanK);
00356         mean_allK = mean(meanK);
00357         int m=K.mod();
00358         real mean_allK = mean(meanK);
00359         for (int i=0;i<l;i++)
00360         {
00361             real* Ki = K[i];
00362             real* Kji_ = &K[0][i];
00363             for (int j=0;j<=i;j++,Kji_+=m)
00364             {
00365                 real Kij = Ki[j] - meanK[i] - meanK[j] + mean_allK;
00366                 Ki[j]=Kij;
00367                 if (j<i)
00368                     *Kji_ =Kij;
00369             }
00370         }
00371     }
00372     else if (Gram_matrix_normalization=="centering_a_distance")
00373     {
00374         columnMean(K,meanK);
00375         mean_allK = mean(meanK);
00376         int m=K.mod();
00377         real mean_allK = mean(meanK);
00378         for (int i=0;i<l;i++)
00379         {
00380             real* Ki = K[i];
00381             real* Kji_ = &K[0][i];
00382             for (int j=0;j<=i;j++,Kji_+=m)
00383             {
00384                 real Kij = -0.5*(Ki[j] - meanK[i] - meanK[j] + mean_allK);
00385                 Ki[j]=Kij;
00386                 if (j<i)
00387                     *Kji_ =Kij;
00388             }
00389         }
00390     }
00391     else if (Gram_matrix_normalization=="divisive")
00392     {
00393         columnMean(K,meanK);
00394         int m=K.mod();
00395         for (int i=0;i<l;i++)
00396         {
00397             real* Ki = K[i];
00398             real* Kji_ = &K[0][i];
00399             for (int j=0;j<=i;j++,Kji_+=m)
00400             {
00401                 real Kij = Ki[j] / sqrt(meanK[i]*meanK[j]);
00402                 Ki[j]=Kij;
00403                 if (j<i)
00404                     *Kji_ =Kij;
00405             }
00406         }
00407     }
00408     // compute principal eigenvectors
00409     int n_components = max_nb_evectors<0 || max_nb_evectors>l ? l : max_nb_evectors;
00410     eigenVecOfSymmMat(K,n_components,eigenvalues,eigenvectors);
00411     // pre-compute alpha[i]=(K+noise_sd[i]^2 I)^{-1}*targets  for regression
00412     for (int i=0;i<n_outputs;i++)
00413     {
00414         VMat target_column = target_rows.subMatColumns(i,1);
00415         inverseCovTimesVec(noise_sd[i]*noise_sd[i],target_column.toMat().toVec(),alpha(i));
00416     }
00417 
00418 }
00419 
00420 real GaussianProcessRegressor::BayesianCost()
00421 {
00424     int l=K.length();
00425     int m=eigenvectors.length();
00426     real nll = l*n_outputs*Log2Pi;
00427     for (int i=0;i<n_outputs;i++)
00428     {
00429         real sigma2_i=noise_sd[i]*noise_sd[i];
00430         //nll += QFormInverse(sigma2_i,targets); // y'*inv(C)*y 
00431         // add the log det(K+sigma_i^2 I) contribution
00432         if (m<l)
00433             // the last l-m eigenvalues are sigma_i^2
00434             nll += (l-m)*safeflog(sigma2_i); 
00435         // while the first m ones are lambda_i + sigma_i^2
00436         for (int j=0;j<m;j++)
00437             nll += safeflog(eigenvalues[i]+sigma2_i);
00438     }
00439     nll *= 0.5;
00440     return nll;
00441 }
00442 
00443 // multiply (K+sigma^2 I)^{-1} by vector v, put result in Cinv_v
00444 // TRICK USING PRINCIPAL E-VECTORS OF K:
00445 //   Let C = sum_{i=1}^m lambda_i v_i v_i' + sigma^2 I
00446 //   with v_i orthonormal eigenvectors. Then, it can also be written
00447 //       C = sum_{i=1}^m (lambda_i +sigma^2) v_i v_i' + sum_{i=m+1}^n sigma^2 v_i v_i'
00448 //   whose inverse is simply
00449 //       inverse(C) = sum_{i=1}^m 1/(lambda_i +sigma^2) v_i v_i' + sum_{i=m+1}^n 1/sigma^2 v_i v_i'
00450 //                  = sum_{i=1}^m (1/(lambda_i +sigma^2) - 1/sigma^2) v_i v_i' + 1/sigma^2 I
00451 //   so 
00452 //    inverse(C) * u = u/sigma + sum_{i=1}^m (1/(lambda_i+sigma^2) - 1/sigma^2) v_i v_i.u
00453 void GaussianProcessRegressor::inverseCovTimesVec(real sigma2, Vec u, Vec Cinv_u) const
00454 {
00455     int m=eigenvectors.length();
00456     real one_over_sigma2 = 1.0/sigma2;
00457     multiply(u,one_over_sigma2,Cinv_u);
00458     for (int i=0;i<m;i++)
00459     {
00460         Vec v_i = eigenvectors(i);
00461         real proj = dot(v_i,u);
00462         multiplyAdd(Cinv_u, v_i, proj*(1.0/(eigenvalues[i]+sigma2)-one_over_sigma2), Cinv_u);
00463     }
00464 }
00465 
00466 real GaussianProcessRegressor::QFormInverse(real sigma2, Vec u) const
00467 {
00468     int m=eigenvectors.length();
00469     real one_over_sigma2 = 1.0/sigma2;
00470     real qf = norm(u)*one_over_sigma2;
00471     for (int i=0;i<m;i++)
00472     {
00473         Vec v_i = eigenvectors(i);
00474         real proj = dot(v_i,u);
00475         qf += (1.0/(eigenvalues[i]+sigma2)-one_over_sigma2) * proj*proj;
00476     }
00477     return qf;
00478 }
00479 
00480 
00481 } // end of namespace PLearn
00482 
00483 
00484 /*
00485   Local Variables:
00486   mode:c++
00487   c-basic-offset:4
00488   c-file-style:"stroustrup"
00489   c-file-offsets:((innamespace . 0)(inline-open . 0))
00490   indent-tabs-mode:nil
00491   fill-column:79
00492   End:
00493 */
00494 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines