PLearn 0.1
GaussianProcessNLLVariable.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianProcessNLLVariable.cc
00004 //
00005 // Copyright (C) 2006 Nicolas Chapados
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Nicolas Chapados
00036 
00039 #define PL_LOG_MODULE_NAME "GaussianProcessNLLVariable"
00040 
00041 // From PLearn
00042 #include "GaussianProcessNLLVariable.h"
00043 #include <plearn/io/pl_log.h>
00044 #include <plearn/math/TMat_maths.h>
00045 #include <plearn/io/MatIO.h>
00046 
00047 #ifdef USE_BLAS_SPECIALISATIONS
00048 #include <plearn/math/plapack.h>
00049 #endif
00050 
00051 namespace PLearn {
00052 using namespace std;
00053 
00056 PLEARN_IMPLEMENT_OBJECT(
00057     GaussianProcessNLLVariable,
00058     "Compute the Negative-Log-Marginal-Likelihood for Gaussian Process Regression",
00059     "This Variable computes the negative-log-marginal likelihood function\n"
00060     "associated with Gaussian Process Regression (see GaussianProcessRegressor).\n"
00061     "It is primarily used to carry out hyperparameter optimization by conjugate\n"
00062     "gradient descent.\n"
00063     "\n"
00064     "To compute both the fprop and bprop (gradient of marginal NLL w.r.t. each\n"
00065     "hyperparameter), it requires the specification of the Kernel object used,\n"
00066     "the VMatrix of inputs, the VMatrix of targets, and the variables that wrap\n"
00067     "the hyperparameter options within the Kernel object structure (presumably\n"
00068     "ObjectOptionVariable, or similar).  These variables must be scalar\n"
00069     "variables.  To get something like Automatic Relevance Determination, you\n"
00070     "should specify separately each Variable (in the PLearn sense) that\n"
00071     "corresponds to a given input hyperparameter.\n"
00072     );
00073 
00074 GaussianProcessNLLVariable::GaussianProcessNLLVariable()
00075     : m_save_gram_matrix(0),
00076       m_kernel(0),
00077       m_noise(0),
00078       m_allow_bprop(true)
00079 { }
00080 
00081 
00082 GaussianProcessNLLVariable::GaussianProcessNLLVariable(
00083     Kernel* kernel, real noise, Mat inputs, Mat targets,
00084     const TVec<string>& hyperparam_names, const VarArray& hyperparam_vars,
00085     bool allow_bprop, bool save_gram_matrix, PPath expdir)
00086     : inherited(hyperparam_vars, 1, 1),
00087       m_save_gram_matrix(save_gram_matrix),
00088       m_expdir(expdir),
00089       m_kernel(kernel),
00090       m_noise(noise),
00091       m_inputs(inputs),
00092       m_targets(targets),
00093       m_hyperparam_names(hyperparam_names),
00094       m_hyperparam_vars(hyperparam_vars),
00095       m_allow_bprop(allow_bprop)
00096 {
00097     build();
00098 }
00099 
00100 
00101 void GaussianProcessNLLVariable::recomputeSize(int& l, int& w) const
00102 {
00103     // This is always the case for this variable
00104     l = 1;
00105     w = 1;
00106 }
00107 
00108 
00109 // ### Nothing to add here, simply calls build_
00110 void GaussianProcessNLLVariable::build()
00111 {
00112     inherited::build();
00113     build_();
00114 }
00115 
00116 void GaussianProcessNLLVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00117 {
00118     inherited::makeDeepCopyFromShallowCopy(copies);
00119 
00120     deepCopyField(m_kernel,          copies);
00121     deepCopyField(m_inputs,          copies);
00122     deepCopyField(m_targets,         copies);
00123     deepCopyField(m_hyperparam_names,copies);
00124     deepCopyField(m_hyperparam_vars, copies);
00125     deepCopyField(m_gram,            copies);
00126     deepCopyField(m_gram_derivative, copies);
00127     deepCopyField(m_cholesky_gram,   copies);
00128     deepCopyField(m_alpha_t,         copies);
00129     deepCopyField(m_alpha_buf,       copies);
00130     deepCopyField(m_inverse_gram,    copies);
00131     deepCopyField(m_cholesky_tmp,    copies);
00132     deepCopyField(m_rhs_tmp,         copies);
00133 }
00134 
00135 void GaussianProcessNLLVariable::declareOptions(OptionList& ol)
00136 {
00137     declareOption(
00138         ol, "save_gram_matrix", &GaussianProcessNLLVariable::m_save_gram_matrix,
00139         OptionBase::buildoption,
00140         "If true, the Gram matrix is saved before undergoing Cholesky\n"
00141         "decomposition; useful for debugging if the matrix is quasi-singular.");
00142     
00143     // Now call the parent class' declareOptions
00144     inherited::declareOptions(ol);
00145 }
00146 
00147 void GaussianProcessNLLVariable::build_()
00148 {
00149     PLASSERT( m_kernel && m_inputs.isNotNull() && m_targets.isNotNull() );
00150 }
00151 
00152 
00153 //#####  alpha  ###############################################################
00154 
00155 const Mat& GaussianProcessNLLVariable::alpha() const
00156 {
00157     m_alpha_buf.resize(m_alpha_t.width(), m_alpha_t.length());
00158     transpose(m_alpha_t, m_alpha_buf);
00159     return m_alpha_buf;
00160 }
00161     
00162 
00163 //#####  fprop  ###############################################################
00164 
00165 // ### computes value from varray values
00166 void GaussianProcessNLLVariable::fprop()
00167 {
00168     // logVarray(m_hyperparam_vars, "FProp current hyperparameters:", true);
00169 
00170     // Ensure that the current hyperparameter variable values are propagated
00171     // into kernel options
00172     m_hyperparam_vars.fprop();
00173     
00174     fbpropFragments(m_kernel, m_noise, m_inputs, m_targets, m_allow_bprop,
00175                     m_save_gram_matrix, m_expdir,
00176                     m_gram, m_cholesky_gram, m_alpha_t, m_inverse_gram,
00177                     m_cholesky_tmp, m_rhs_tmp);
00178 
00179     // Assuming y is a column vector...  For multivariate targets, we
00180     // separately dot each column of the targets with corresponding columns of
00181     // alpha, and add as many of the other two terms as there are variables
00182     //
00183     //     0.5 * y'*alpha + sum(log(diag(L))) + 0.5*n*log(2*pi)
00184     //
00185     // Don't forget that alpha_t is transposed
00186     const int n = m_alpha_t.width();
00187     const int m = m_alpha_t.length();
00188 
00189     real logdet_log2pi = 0;
00190     for (int i=0 ; i<n ; ++i)
00191         logdet_log2pi += pl_log(m_cholesky_gram(i,i));
00192     logdet_log2pi += 0.5 * n * pl_log(2*M_PI);
00193     
00194     real nll = 0;
00195     for (int i=0 ; i<m ; ++i)
00196         nll += 0.5*dot(m_targets.column(i), m_alpha_t.row(i)) + logdet_log2pi;
00197     value[0] = nll;
00198 }
00199 
00200 
00201 //#####  bprop  ###############################################################
00202 
00203 // ### computes varray gradients from gradient
00204 void GaussianProcessNLLVariable::bprop()
00205 {
00206     PLASSERT_MSG( m_allow_bprop,
00207                   "GaussianProcessNLLVariable must be constructed with the option "
00208                   "'will_bprop'=True in order to call bprop" );
00209     PLASSERT( m_hyperparam_names.size() == m_hyperparam_vars.size() );
00210     PLASSERT( m_alpha_t.width() == m_inverse_gram.width() );
00211     PLASSERT( m_inverse_gram.width() == m_inverse_gram.length() );
00212     PLASSERT( m_kernel );
00213     
00214     // Loop over the hyperparameters in order to compute the derivative of the
00215     // gram matrix once for each hyperparameter.  Then loop over the target
00216     // variables to accumulate the gradient.  For each target, we must compute
00217     //
00218     //    trace((K^-1 - alpha*alpha') * dK/dtheta_j)
00219     //
00220     // Since both the first term inside the trace and the derivative of the
00221     // gram matrix are symmetric square matrices, the trace is efficiently
00222     // computed as the sum of the elementwise product of those matrices.
00223     //
00224     // Don't forget that m_alpha_t is transposed.
00225     for (int j=0, m=m_hyperparam_names.size() ; j<m ; ++j) {
00226         real dnll_dj = 0;
00227         m_kernel->computeGramMatrixDerivative(m_gram_derivative,
00228                                               m_hyperparam_names[j]);
00229         for (int i=0, n=m_alpha_t.length() ; i<n ; ++i) {
00230             real* curalpha = m_alpha_t[i];
00231             real  cur_trace = 0.0;
00232 
00233             // Sum over all rows and columns of matrix
00234             real* curalpha_row = curalpha;
00235             for (int row=0, nrows=m_inverse_gram.length()
00236                      ; row<nrows ; ++row, ++curalpha_row)
00237             {
00238                 real* p_inverse_gram     = m_inverse_gram[row];
00239                 real* p_gram_derivative  = m_gram_derivative[row];
00240                 real  curalpha_row_value = *curalpha_row;
00241                 real* curalpha_col       = curalpha;
00242                 real  row_trace          = 0.0;
00243 
00244                 for (int col=0 ; col <= row ; ++col, ++curalpha_col)
00245                 {
00246                     if (col == row)
00247                         row_trace *= 2.;
00248                     
00249                     row_trace +=
00250                         (*p_inverse_gram++ - curalpha_row_value * *curalpha_col)
00251                         * *p_gram_derivative++;
00252 
00253                     // curtrace +=
00254                     //     (m_inverse_gram(row,col) - curalpha(row,0)*curalpha(col,0))
00255                     //     * m_gram_derivative(row,col);
00256                 }
00257                 cur_trace += row_trace;
00258             }
00259 
00260             dnll_dj += cur_trace / 2.0;
00261         }
00262         m_hyperparam_vars[j]->gradient[0] += dnll_dj * gradient[0];
00263     }
00264 }
00265 
00266 
00267 //#####  fbpropFragments (NO LAPACK)  #########################################
00268 
00269 #ifndef USE_BLAS_SPECIALISATIONS
00270 void GaussianProcessNLLVariable::fbpropFragments(
00271     Kernel* kernel, real noise, const Mat& inputs, const Mat& targets,
00272     bool compute_inverse, bool save_gram_matrix, const PPath& expdir,
00273     Mat& gram, Mat& L, Mat& alpha_t, Mat& inv,
00274     Vec& tmp_chol, Mat& tmp_rhs)
00275 {
00276     PLASSERT( kernel );
00277     PLASSERT( inputs.length() == targets.length() );
00278     const int trainlength = inputs.length();
00279     const int targetsize  = targets.width();
00280     
00281     // The RHS matrix (when solving the linear system Gram*Params=RHS) is made
00282     // up of two parts: the regression targets themselves, and the identity
00283     // matrix if we requested them (for confidence intervals).  After solving
00284     // the linear system, set the gram-inverse appropriately.
00285     int rhs_width = targetsize + (compute_inverse? trainlength : 0);
00286     tmp_rhs.resize(trainlength, rhs_width);
00287     tmp_rhs.subMatColumns(0, targetsize) << targets;
00288     if (compute_inverse) {
00289         Mat rhs_identity = tmp_rhs.subMatColumns(targetsize, trainlength);
00290         identityMatrix(rhs_identity);
00291     }
00292 
00293     // Compute Gram Matrix and add weight decay to diagonal
00294     kernel->setDataForKernelMatrix(inputs);
00295     gram.resize(trainlength, trainlength);
00296     kernel->computeGramMatrix(gram);
00297     addToDiagonal(gram, noise);
00298 
00299     // The PLearn code relies on the matrix actually being symmetric in memory
00300     // (assumption which LAPACK does not make). Symmetrize the matrix.
00301     PLCHECK(kernel->is_symmetric);
00302     PLASSERT_MSG(gram.isSymmetric(false), "Gram matrix is not symmetric");
00303     fillItSymmetric(gram);
00304 
00305     // Save the Gram matrix if requested
00306     if (save_gram_matrix) {
00307         static int counter = 1;
00308         string filename = expdir / ("gram_matrix_" +
00309                                     tostring(counter++) + ".pmat");
00310         savePMat(filename, gram);
00311     }
00312 
00313     // Dump a fragment of the Gram Matrix to the debug log
00314     DBG_MODULE_LOG << "Gram fragment: "
00315                    << gram(0,0) << ' '
00316                    << gram(1,0) << ' '
00317                    << gram(1,1) << endl;
00318 
00319     // Compute Cholesky decomposition and solve the linear system
00320     alpha_t.resize(trainlength, rhs_width);
00321     L.resize(trainlength, trainlength);
00322     tmp_chol.resize(trainlength);
00323     solveLinearSystemByCholesky(gram, tmp_rhs, alpha_t, &L, &tmp_chol);
00324 
00325     // Must return transpose here since the code has been modified to work with
00326     // a transposed alpha, to better interface with lapack (much faster in the
00327     // latter case to avoid superfluous transposes).
00328     if (compute_inverse) {
00329         inv     = alpha_t.subMatColumns(targetsize, trainlength);
00330         alpha_t = transpose(alpha_t.subMatColumns(0, targetsize));
00331     }
00332     else
00333         alpha_t = transpose(alpha_t);
00334 }
00335 #endif
00336 
00337 //#####  fbpropFragments (LAPACK)  ############################################
00338 
00339 // #if 0
00340 #ifdef USE_BLAS_SPECIALISATIONS
00341 void GaussianProcessNLLVariable::fbpropFragments(
00342     Kernel* kernel, real noise, const Mat& inputs, const Mat& targets,
00343     bool compute_inverse, bool save_gram_matrix, const PPath& expdir,
00344     Mat& gram, Mat& L, Mat& alpha_t, Mat& inv,
00345     Vec& tmp_chol, Mat& tmp_rhs)
00346 {
00347     PLASSERT( kernel );
00348     PLASSERT( inputs.length() == targets.length() );
00349     const int trainlength = inputs.length();
00350     const int targetsize  = targets.width();
00351     
00352     // The RHS matrix (when solving the linear system Gram*Params=RHS) is made
00353     // up of two parts: the regression targets themselves, and the identity
00354     // matrix if we requested them (for confidence intervals).  After solving
00355     // the linear system, set the gram-inverse appropriately.  To interface
00356     // nicely with LAPACK, we store this in a transposed format.
00357     int rhs_width = targetsize + (compute_inverse? trainlength : 0);
00358     tmp_rhs.resize(rhs_width, trainlength);
00359     Mat targets_submat = tmp_rhs.subMatRows(0, targetsize);
00360     transpose(targets, targets_submat);
00361     if (compute_inverse) {
00362         Mat rhs_identity = tmp_rhs.subMatRows(targetsize, trainlength);
00363         identityMatrix(rhs_identity);
00364     }
00365 
00366     // Compute Gram Matrix and add weight decay to diagonal
00367     kernel->setDataForKernelMatrix(inputs);
00368     gram.resize(trainlength, trainlength);
00369     kernel->computeGramMatrix(gram);
00370     addToDiagonal(gram, noise);
00371 
00372     // Save the Gram matrix if requested
00373     if (save_gram_matrix) {
00374         static int counter = 1;
00375         string filename = expdir / ("gram_matrix_" +
00376                                     tostring(counter++) + ".pmat");
00377         savePMat(filename, gram);
00378     }
00379 
00380     // Dump a fragment of the Gram Matrix to the debug log
00381     DBG_MODULE_LOG << "Gram fragment: "
00382                    << gram(0,0) << ' '
00383                    << gram(1,0) << ' '
00384                    << gram(1,1) << endl;
00385 
00386     // Compute Cholesky decomposition and solve the linear system.  LAPACK
00387     // solves in-place, but luckily we don't need either the Gram and RHS
00388     // matrices after solving.  Note that for now we don't bother to create an
00389     // appropriately transposed RHS (will come later).
00390     lapackCholeskyDecompositionInPlace(gram);
00391     lapackCholeskySolveInPlace(gram, tmp_rhs, true /* column-major */);
00392     alpha_t = tmp_rhs;                         // LAPACK solves in-place
00393     L       = gram;                            // LAPACK solves in-place
00394     
00395     if (compute_inverse) {
00396         inv     = alpha_t.subMatRows(targetsize, trainlength);
00397         alpha_t = alpha_t.subMatRows(0, targetsize);
00398     }
00399 }
00400 #endif
00401 // #endif
00402 
00403 //#####  logVarray  ###########################################################
00404 
00405 void GaussianProcessNLLVariable::logVarray(const VarArray& varr,
00406                                            const string& title, bool debug)
00407 {
00408     string entry = title + '\n';
00409     for (int i=0, n=varr.size() ; i<n ; ++i) {
00410         entry += right(varr[i]->getName(), 35) + ": " + tostring(varr[i]->value[0]);
00411         if (i < n-1)
00412             entry += '\n';
00413     }
00414     if (debug) {
00415         DBG_MODULE_LOG << entry << endl;
00416     }
00417     else {
00418         MODULE_LOG << entry << endl;
00419     }
00420 }
00421 
00422 } // end of namespace PLearn
00423 
00424 
00425 /*
00426   Local Variables:
00427   mode:c++
00428   c-basic-offset:4
00429   c-file-style:"stroustrup"
00430   c-file-offsets:((innamespace . 0)(inline-open . 0))
00431   indent-tabs-mode:nil
00432   fill-column:79
00433   End:
00434 */
00435 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines