PLearn 0.1
SquaredExponentialARDKernel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // SquaredExponentialARDKernel.cc
00004 //
00005 // Copyright (C) 2006-2007 Nicolas Chapados
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Nicolas Chapados
00036 
00040 #include "SquaredExponentialARDKernel.h"
00041 
00042 namespace PLearn {
00043 using namespace std;
00044 
00045 PLEARN_IMPLEMENT_OBJECT(
00046     SquaredExponentialARDKernel,
00047     "Squared-Exponential kernel that can be used for Automatic Relevance Determination",
00048     "This is a variant of the GaussianKernel (a.k.a. Radial Basis Function)\n"
00049     "that provides a different length-scale parameter for each input variable.\n"
00050     "When used in conjunction with GaussianProcessRegressor, this kernel may be\n"
00051     "used for Automatic Relevance Determination (ARD), a procedure wherein the\n"
00052     "significance of each input variable for the prediction task is found\n"
00053     "automatically through numerical optimization.\n"
00054     "\n"
00055     "Similar to C.E. Rasmussen's GPML code (see http://www.gaussianprocess.org),\n"
00056     "this kernel function is specified as:\n"
00057     "\n"
00058     "  k(x,y) = sf * exp(- 0.5 * (sum_i (x_i - y_i)^2 / w_i)) * k_kron(x,y)\n"
00059     "\n"
00060     "where sf is softplus(isp_signal_sigma), w_i is softplus(isp_global_sigma +\n"
00061     "isp_input_sigma[i]), and k_kron(x,y) is the result of the\n"
00062     "KroneckerBaseKernel evaluation, or 1.0 if there are no Kronecker terms.\n"
00063     "Note that since the Kronecker terms are incorporated multiplicatively, the\n"
00064     "very presence of the term associated to this kernel can be gated by the\n"
00065     "value of some input variable(s) (that are incorporated within one or more\n"
00066     "Kronecker terms).\n"
00067     "\n"
00068     "Note that contrarily to previous versions that incorporated IID noise and\n"
00069     "Kronecker terms ADDITIVELY, this version does not add any noise at all (and\n"
00070     "as explained above incorporates the Kronecker terms multiplicatively).  For\n"
00071     "best results, especially with moderately noisy data, IT IS IMPERATIVE to\n"
00072     "use whis kernel within a SummationKernel in conjunction with an\n"
00073     "IIDNoiseKernel, as follows (e.g. within a GaussianProcessRegressor):\n"
00074     "\n"
00075     "    kernel = SummationKernel(terms = [ SquaredExponentialARDKernel(),\n"
00076     "                                       IIDNoiseKernel() ] )\n"
00077     "\n"
00078     "Note that to make its operations more robust when used with unconstrained\n"
00079     "optimization of hyperparameters, all hyperparameters of this kernel are\n"
00080     "specified in the inverse softplus domain.  See IIDNoiseKernel for more\n"
00081     "explanations.\n"
00082     );
00083 
00084 
00085 SquaredExponentialARDKernel::SquaredExponentialARDKernel()
00086 { }
00087 
00088 
00089 //#####  declareOptions  ######################################################
00090 
00091 void SquaredExponentialARDKernel::declareOptions(OptionList& ol)
00092 {
00093     // Now call the parent class' declareOptions
00094     inherited::declareOptions(ol);
00095 }
00096 
00097 
00098 //#####  build  ###############################################################
00099 
00100 void SquaredExponentialARDKernel::build()
00101 {
00102     // ### Nothing to add here, simply calls build_
00103     inherited::build();
00104     build_();
00105 }
00106 
00107 
00108 //#####  build_  ##############################################################
00109 
00110 void SquaredExponentialARDKernel::build_()
00111 {
00112     // Ensure that we multiply in Kronecker terms
00113     inherited::m_default_value = 1.0;
00114 }
00115 
00116 
00117 //#####  evaluate  ############################################################
00118 
00119 real SquaredExponentialARDKernel::evaluate(const Vec& x1, const Vec& x2) const
00120 {
00121     PLASSERT( x1.size() == x2.size() );
00122     PLASSERT( !m_isp_input_sigma.size() || x1.size() == m_isp_input_sigma.size() );
00123 
00124     real gating_term = inherited::evaluate(x1,x2);
00125     if (fast_is_equal(gating_term, 0.0))
00126         return 0.0;
00127     
00128     if (x1.size() == 0)
00129         return softplus(m_isp_signal_sigma) * gating_term;
00130     
00131     const real* px1 = x1.data();
00132     const real* px2 = x2.data();
00133     real sf         = softplus(m_isp_signal_sigma);
00134     real expval     = 0.0;
00135     
00136     if (m_isp_input_sigma.size() > 0) {
00137         const real* pinpsig = m_isp_input_sigma.data();
00138         for (int i=0, n=x1.size() ; i<n ; ++i) {
00139             real diff   = *px1++ - *px2++;
00140             real sqdiff = diff * diff;
00141             expval     += sqdiff / softplus(m_isp_global_sigma + *pinpsig++);
00142         }
00143     }
00144     else {
00145         real global_sigma = softplus(m_isp_global_sigma);
00146         for (int i=0, n=x1.size() ; i<n ; ++i) {
00147             real diff   = *px1++ - *px2++;
00148             real sqdiff = diff * diff;
00149             expval     += sqdiff / global_sigma;
00150         }
00151     }
00152 
00153     // Gate by Kronecker term
00154     return sf * exp(-0.5 * expval) * gating_term;
00155 }
00156 
00157 
00158 //#####  computeGramMatrix  ###################################################
00159 
00160 void SquaredExponentialARDKernel::computeGramMatrix(Mat K) const
00161 {
00162     PLASSERT( !m_isp_input_sigma.size() || dataInputsize() == m_isp_input_sigma.size() );
00163     PLASSERT( K.size() == 0 || m_data_cache.size() > 0 );  // Ensure data cached OK
00164 
00165     // Compute Kronecker gram matrix
00166     inherited::computeGramMatrix(K);
00167 
00168     // Precompute some terms. Make sure that the input sigmas don't get too
00169     // small
00170     real sf    = softplus(m_isp_signal_sigma);
00171     m_input_sigma.resize(dataInputsize());
00172     softplusFloor(m_isp_global_sigma, 1e-6);
00173     m_input_sigma.fill(m_isp_global_sigma);  // Still in ISP domain
00174     for (int i=0, n=m_input_sigma.size() ; i<n ; ++i) {
00175         if (m_isp_input_sigma.size() > 0) {
00176             softplusFloor(m_isp_input_sigma[i], 1e-6);
00177             m_input_sigma[i] += m_isp_input_sigma[i];
00178         }
00179         m_input_sigma[i] = softplus(m_input_sigma[i]);
00180     }
00181 
00182     // Compute Gram Matrix
00183     int  l = data->length();
00184     int  m = K.mod();
00185     int  n = dataInputsize();
00186     int  cache_mod = m_data_cache.mod();
00187 
00188     real *data_start = &m_data_cache(0,0);
00189     real *Ki = K[0];                         // Start of current row
00190     real *Kij;                               // Current element along row
00191     real *input_sigma_data = m_input_sigma.data();
00192     real *xi = data_start;
00193     
00194     for (int i=0 ; i<l ; ++i, xi += cache_mod, Ki+=m)
00195     {
00196         Kij = Ki;
00197         real *xj = data_start;
00198 
00199         for (int j=0; j<=i; ++j, xj += cache_mod) {
00200             // Kernel evaluation per se
00201             real *x1 = xi;
00202             real *x2 = xj;
00203             real *p_inpsigma = input_sigma_data;
00204             real sum_wt = 0.0;
00205             int  k = n;
00206 
00207             // Use Duff's device to unroll the following loop:
00208             //     while (k--) {
00209             //         real diff = *x1++ - *x2++;
00210             //         sum_wt += (diff * diff) / *p_inpsigma++;
00211             //     }
00212             real diff;
00213             switch (k % 8) {
00214             case 0: do { diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00215             case 7:      diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00216             case 6:      diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00217             case 5:      diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00218             case 4:      diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00219             case 3:      diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00220             case 2:      diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00221             case 1:      diff = *x1++ - *x2++; sum_wt += (diff*diff) / *p_inpsigma++;
00222                        } while((k -= 8) > 0);
00223             }
00224 
00225             // Multiplicatively update kernel matrix (already pre-filled with
00226             // Kronecker terms, or 1.0 if no Kronecker terms, as per build_).
00227             real Kij_cur = *Kij * sf * exp(-0.5 * sum_wt);
00228             *Kij++ = Kij_cur;
00229         }
00230     }
00231     if (cache_gram_matrix) {
00232         gram_matrix.resize(l,l);
00233         gram_matrix << K;
00234         gram_matrix_is_cached = true;
00235     }
00236 }
00237 
00238 
00239 //#####  computeGramMatrixDerivative  #########################################
00240 
00241 void SquaredExponentialARDKernel::computeGramMatrixDerivative(
00242     Mat& KD, const string& kernel_param, real epsilon) const
00243 {
00244     static const string ISS("isp_signal_sigma");
00245     static const string IGS("isp_global_sigma");
00246     static const string IIS("isp_input_sigma[");
00247 
00248     if (kernel_param == ISS) {
00249         computeGramMatrixDerivIspSignalSigma(KD);
00250         
00251         // computeGramMatrixDerivNV<
00252         //     SquaredExponentialARDKernel,
00253         //     &SquaredExponentialARDKernel::derivIspSignalSigma>(KD, this, -1);
00254     }
00255     else if (kernel_param == IGS) {
00256         computeGramMatrixDerivNV<
00257             SquaredExponentialARDKernel,
00258             &SquaredExponentialARDKernel::derivIspGlobalSigma>(KD, this, -1);
00259     }
00260     else if (string_begins_with(kernel_param, IIS) &&
00261              kernel_param[kernel_param.size()-1] == ']')
00262     {
00263         int arg = tolong(kernel_param.substr(
00264                              IIS.size(), kernel_param.size() - IIS.size() - 1));
00265         PLASSERT( arg < m_isp_input_sigma.size() );
00266 
00267         computeGramMatrixDerivIspInputSigma(KD, arg);
00268 
00269     }
00270     else
00271         inherited::computeGramMatrixDerivative(KD, kernel_param, epsilon);
00272 }
00273 
00274 
00275 //#####  evaluate_all_i_x  ####################################################
00276 
00277 void SquaredExponentialARDKernel::evaluate_all_i_x(const Vec& x, const Vec& k_xi_x,
00278                                                    real squared_norm_of_x, int istart) const
00279 {
00280     evaluateAllIXNV<SquaredExponentialARDKernel>(x, k_xi_x, istart);
00281 }
00282 
00283 
00284 //#####  derivIspSignalSigma  #################################################
00285 
00286 real SquaredExponentialARDKernel::derivIspSignalSigma(int i, int j, int arg, real K) const
00287 {
00288     // (No longer used; see computeGramMatrixDerivIspInputSigma below)
00289     return K*sigmoid(m_isp_signal_sigma)/softplus(m_isp_signal_sigma);
00290 }
00291 
00292 
00293 //#####  derivIspGlobalSigma  #################################################
00294 
00295 real SquaredExponentialARDKernel::derivIspGlobalSigma(int i, int j, int arg, real K) const
00296 {
00297     if (fast_is_equal(K,0.))
00298         return 0.;
00299 
00300     // The norm term inside the exponential may be accessed as Log(K/sf)
00301     real inner = pl_log(K / softplus(m_isp_signal_sigma));
00302     return - K * inner * sigmoid(m_isp_global_sigma) / softplus(m_isp_global_sigma);
00303 
00304     // Note: in the above expression for 'inner' there is the implicit
00305     // assumption that the input_sigma[i] are zero, which allows the
00306     // sigmoid/softplus term to be factored out of the norm summation.
00307 }
00308 
00309 
00310 //#####  computeGramMatrixDerivIspSignalSigma  ################################
00311 
00312 void SquaredExponentialARDKernel::computeGramMatrixDerivIspSignalSigma(Mat& KD) const
00313 {
00314     int l = data->length();
00315     KD.resize(l,l);
00316     PLASSERT_MSG(
00317         gram_matrix.width() == l && gram_matrix.length() == l,
00318         "To compute the derivative with respect to 'isp_signal_sigma', the\n"
00319         "Gram matrix must be precomputed and cached in SquaredExponentialARDKernel.");
00320     
00321     KD << gram_matrix;
00322     KD *= sigmoid(m_isp_signal_sigma)/softplus(m_isp_signal_sigma);
00323 }
00324 
00325 
00326 //#####  computeGramMatrixDerivIspInputSigma  #################################
00327 
00328 void SquaredExponentialARDKernel::computeGramMatrixDerivIspInputSigma(Mat& KD,
00329                                                                       int arg) const
00330 {
00331     // Precompute some terms
00332     real input_sigma_arg = m_input_sigma[arg];
00333     real input_sigma_sq  = input_sigma_arg * input_sigma_arg;
00334     real input_sigmoid   = sigmoid(m_isp_global_sigma + m_isp_input_sigma[arg]);
00335     
00336     // Compute Gram Matrix derivative w.r.t. isp_input_sigma[arg]
00337     int  l = data->length();
00338     PLASSERT_MSG(
00339         gram_matrix.width() == l && gram_matrix.length() == l,
00340         "To compute the derivative with respect to 'isp_input_sigma[i]', the\n"
00341         "Gram matrix must be precomputed and cached in SquaredExponentialARDKernel.");
00342 
00343     // Variables that walk over the data matrix
00344     int  cache_mod = m_data_cache.mod();
00345     real *data_start = &m_data_cache(0,0);
00346     real *xi = data_start+arg;               // Iterator on data rows
00347 
00348     // Variables that walk over the gram cache
00349     int   gram_cache_mod = gram_matrix.mod();
00350     real *gram_cache_row = gram_matrix.data();
00351     real *gram_cache_cur;
00352     
00353     // Variables that walk over the kernel derivative matrix (KD)
00354     KD.resize(l,l);
00355     real* KDi = KD.data();                   // Start of row i
00356     real* KDij;                              // Current element on row i
00357     int   KD_mod = KD.mod();
00358 
00359     // Iterate on rows of derivative matrix
00360     for (int i=0 ; i<l ; ++i, xi += cache_mod, KDi += KD_mod,
00361              gram_cache_row += gram_cache_mod)
00362     {
00363         KDij = KDi;
00364         real *xj  = data_start+arg;           // Inner iterator on data rows
00365         gram_cache_cur = gram_cache_row;
00366 
00367         // Iterate on columns of derivative matrix
00368         for (int j=0 ; j <= i
00369                  ; ++j, xj += cache_mod, ++gram_cache_cur)
00370         {
00371             real diff    = *xi - *xj;
00372             real sq_diff = diff * diff;
00373             real KD_cur  = 0.5 * *gram_cache_cur *
00374                            input_sigmoid * sq_diff / input_sigma_sq;
00375 
00376             // Set into derivative matrix
00377             *KDij++ = KD_cur;
00378         }
00379     }
00380 }
00381 
00382 
00383 //#####  makeDeepCopyFromShallowCopy  #########################################
00384 
00385 void SquaredExponentialARDKernel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00386 {
00387     inherited::makeDeepCopyFromShallowCopy(copies);
00388 }
00389 
00390 } // end of namespace PLearn
00391 
00392 
00393 /*
00394   Local Variables:
00395   mode:c++
00396   c-basic-offset:4
00397   c-file-style:"stroustrup"
00398   c-file-offsets:((innamespace . 0)(inline-open . 0))
00399   indent-tabs-mode:nil
00400   fill-column:79
00401   End:
00402 */
00403 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines