PLearn 0.1
LinearARDKernel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // LinearARDKernel.cc
00004 //
00005 // Copyright (C) 2007-2009 Nicolas Chapados
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Nicolas Chapados
00036 
00040 #include "LinearARDKernel.h"
00041 
00042 namespace PLearn {
00043 using namespace std;
00044 
00045 PLEARN_IMPLEMENT_OBJECT(
00046     LinearARDKernel,
00047     "Linear kernel that can be used for Automatic Relevance Determination",
00048     "This is a simple linear (dot-product) kernel that provides a different\n"
00049     "length-scale parameter for each input variable.  When used in conjunction\n"
00050     "with GaussianProcessRegressor it yields a Bayesian linear regression model\n"
00051     "with a non-isotropic prior.  (It is not a particularly efficient way of\n"
00052     "performing linear regression, but can be useful as a benchmark against\n"
00053     "other kernels).\n"
00054     "\n"
00055     "This kernel function is specified as:\n"
00056     "\n"
00057     "  k(x,y) = sf * (sum_i x_i * y_i / w_i) * k_kron(x,y)\n"
00058     "\n"
00059     "where sf is softplus(isp_signal_sigma), w_i is softplus(isp_global_sigma +\n"
00060     "isp_input_sigma[i]), and k_kron(x,y) is the result of the\n"
00061     "KroneckerBaseKernel evaluation, or 1.0 if there are no Kronecker terms.\n"
00062     "Note that since the Kronecker terms are incorporated multiplicatively, the\n"
00063     "very presence of the term associated to this kernel can be gated by the\n"
00064     "value of some input variable(s) (that are incorporated within one or more\n"
00065     "Kronecker terms).\n"
00066     "\n"
00067     "For best results, especially with moderately noisy data, IT IS IMPERATIVE\n"
00068     "to use whis kernel within a SummationKernel in conjunction with an\n"
00069     "IIDNoiseKernel, as follows (e.g. within a GaussianProcessRegressor):\n"
00070     "\n"
00071     "    kernel = SummationKernel(terms = [ LinearARDKernel(),\n"
00072     "                                       IIDNoiseKernel() ] )\n"
00073     "\n"
00074     "Note that to make its operations more robust when used with unconstrained\n"
00075     "optimization of hyperparameters, all hyperparameters of this kernel are\n"
00076     "specified in the inverse softplus domain.  See IIDNoiseKernel for more\n"
00077     "explanations.\n"
00078     );
00079 
00080 
00081 LinearARDKernel::LinearARDKernel()
00082 { }
00083 
00084 
00085 //#####  declareOptions  ######################################################
00086 
00087 void LinearARDKernel::declareOptions(OptionList& ol)
00088 {
00089     // Now call the parent class' declareOptions
00090     inherited::declareOptions(ol);
00091 }
00092 
00093 
00094 //#####  build  ###############################################################
00095 
00096 void LinearARDKernel::build()
00097 {
00098     // ### Nothing to add here, simply calls build_
00099     inherited::build();
00100     build_();
00101 }
00102 
00103 
00104 //#####  build_  ##############################################################
00105 
00106 void LinearARDKernel::build_()
00107 {
00108     // Ensure that we multiply in Kronecker terms
00109     inherited::m_default_value = 1.0;
00110 }
00111 
00112 
00113 //#####  evaluate  ############################################################
00114 
00115 real LinearARDKernel::evaluate(const Vec& x1, const Vec& x2) const
00116 {
00117     PLASSERT( x1.size() == x2.size() );
00118     PLASSERT( !m_isp_input_sigma.size() || x1.size() == m_isp_input_sigma.size() );
00119 
00120     real gating_term = inherited::evaluate(x1,x2);
00121     if (fast_is_equal(gating_term, 0.0) || x1.size() == 0)
00122         return 0.0;
00123     
00124     real the_dot    = 0.0;
00125     if (m_isp_input_sigma.size() > 0) {
00126         const real* px1 = x1.data();
00127         const real* px2 = x2.data();
00128         const real* pinpsig = m_isp_input_sigma.data();
00129         for (int i=0, n=x1.size() ; i<n ; ++i) {
00130             the_dot += (*px1++ * *px2++) / softplus(m_isp_global_sigma + *pinpsig++);
00131         }
00132     }
00133     else {
00134         real global_sigma = softplus(m_isp_global_sigma);
00135         the_dot = dot(x1, x2) / global_sigma;
00136     }
00137 
00138     // Gate by Kronecker term
00139     return softplus(m_isp_signal_sigma) * the_dot * gating_term;
00140 }
00141 
00142 
00143 //#####  computeGramMatrix  ###################################################
00144 
00145 void LinearARDKernel::computeGramMatrix(Mat K) const
00146 {
00147     PLASSERT( !m_isp_input_sigma.size() || dataInputsize() == m_isp_input_sigma.size() );
00148     PLASSERT( K.size() == 0 || m_data_cache.size() > 0 );  // Ensure data cached OK
00149 
00150     // Compute Kronecker gram matrix
00151     inherited::computeGramMatrix(K);
00152 
00153     // Precompute some terms. Make sure that the input sigmas don't get too
00154     // small
00155     real sf = softplus(m_isp_signal_sigma);
00156     m_input_sigma.resize(dataInputsize());
00157     softplusFloor(m_isp_global_sigma, 1e-6);
00158     m_input_sigma.fill(m_isp_global_sigma);  // Still in ISP domain
00159     for (int i=0, n=m_input_sigma.size() ; i<n ; ++i) {
00160         if (m_isp_input_sigma.size() > 0) {
00161             softplusFloor(m_isp_input_sigma[i], 1e-6);
00162             m_input_sigma[i] += m_isp_input_sigma[i];
00163         }
00164         m_input_sigma[i] = softplus(m_input_sigma[i]);
00165     }
00166 
00167     // Compute Gram Matrix
00168     int  l = data->length();
00169     int  m = K.mod();
00170     int  n = dataInputsize();
00171     int  cache_mod = m_data_cache.mod();
00172 
00173     real *data_start = &m_data_cache(0,0);
00174     real *Ki = K[0];                         // Start of current row
00175     real *Kij;                               // Current element along row
00176     real *input_sigma_data = m_input_sigma.data();
00177     real *xi = data_start;
00178     
00179     for (int i=0 ; i<l ; ++i, xi += cache_mod, Ki+=m)
00180     {
00181         Kij = Ki;
00182         real *xj = data_start;
00183 
00184         for (int j=0; j<=i; ++j, xj += cache_mod) {
00185             // Kernel evaluation per se
00186             real *x1 = xi;
00187             real *x2 = xj;
00188             real *p_inpsigma = input_sigma_data;
00189             real the_dot = 0.0;
00190             int  k = n;
00191 
00192             // Use Duff's device to unroll the following loop:
00193             //     while (k--) {
00194             //         the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00195             //     }
00196             switch (k % 8) {
00197             case 0: do { the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00198             case 7:      the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00199             case 6:      the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00200             case 5:      the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00201             case 4:      the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00202             case 3:      the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00203             case 2:      the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00204             case 1:      the_dot += (*x1++ * *x2++) / *p_inpsigma++;
00205                        } while((k -= 8) > 0);
00206             }
00207 
00208             // Multiplicatively update kernel matrix (already pre-filled with
00209             // Kronecker terms, or 1.0 if no Kronecker terms, as per build_).
00210             real Kij_cur = *Kij * sf * the_dot;
00211             *Kij++ = Kij_cur;
00212         }
00213     }
00214     if (cache_gram_matrix) {
00215         gram_matrix.resize(l,l);
00216         gram_matrix << K;
00217         gram_matrix_is_cached = true;
00218     }
00219 }
00220 
00221 
00222 //#####  computeGramMatrixDerivative  #########################################
00223 
00224 void LinearARDKernel::computeGramMatrixDerivative(
00225     Mat& KD, const string& kernel_param, real epsilon) const
00226 {
00227     static const string ISS("isp_signal_sigma");
00228     static const string IGS("isp_global_sigma");
00229     static const string IIS("isp_input_sigma[");
00230 
00231     if (kernel_param == ISS) {
00232         computeGramMatrixDerivIspSignalSigma(KD);
00233         
00234         // computeGramMatrixDerivNV<
00235         //     LinearARDKernel,
00236         //     &LinearARDKernel::derivIspSignalSigma>(KD, this, -1);
00237     }
00238     else if (kernel_param == IGS) {
00239         computeGramMatrixDerivNV<
00240             LinearARDKernel,
00241             &LinearARDKernel::derivIspGlobalSigma>(KD, this, -1);
00242     }
00243     else if (string_begins_with(kernel_param, IIS) &&
00244              kernel_param[kernel_param.size()-1] == ']')
00245     {
00246         int arg = tolong(kernel_param.substr(
00247                              IIS.size(), kernel_param.size() - IIS.size() - 1));
00248         PLASSERT( arg < m_isp_input_sigma.size() );
00249 
00250         computeGramMatrixDerivIspInputSigma(KD, arg);
00251 
00252     }
00253     else
00254         inherited::computeGramMatrixDerivative(KD, kernel_param, epsilon);
00255 }
00256 
00257 
00258 //#####  evaluate_all_i_x  ####################################################
00259 
00260 void LinearARDKernel::evaluate_all_i_x(const Vec& x, const Vec& k_xi_x,
00261                                        real squared_norm_of_x, int istart) const
00262 {
00263     evaluateAllIXNV<LinearARDKernel>(x, k_xi_x, istart);
00264 }
00265 
00266 
00267 
00268 //#####  derivIspSignalSigma  #################################################
00269 
00270 real LinearARDKernel::derivIspSignalSigma(int i, int j, int arg, real K) const
00271 {
00272     // (No longer used; see computeGramMatrixDerivIspInputSigma below)
00273     return K*sigmoid(m_isp_signal_sigma)/softplus(m_isp_signal_sigma);
00274 }
00275 
00276 
00277 //#####  derivIspGlobalSigma  #################################################
00278 
00279 real LinearARDKernel::derivIspGlobalSigma(int i, int j, int arg, real K) const
00280 {
00281     if (fast_is_equal(K,0.))
00282         return 0.;
00283 
00284     return - K * sigmoid(m_isp_global_sigma) / softplus(m_isp_global_sigma);
00285 }
00286 
00287 
00288 //#####  computeGramMatrixDerivIspSignalSigma  ################################
00289 
00290 void LinearARDKernel::computeGramMatrixDerivIspSignalSigma(Mat& KD) const
00291 {
00292     int l = data->length();
00293     KD.resize(l,l);
00294     PLASSERT_MSG(
00295         gram_matrix.width() == l && gram_matrix.length() == l,
00296         "To compute the derivative with respect to 'isp_signal_sigma', the\n"
00297         "Gram matrix must be precomputed and cached in LinearARDKernel.");
00298     
00299     KD << gram_matrix;
00300     KD *= sigmoid(m_isp_signal_sigma)/softplus(m_isp_signal_sigma);
00301 }
00302 
00303 
00304 //#####  computeGramMatrixDerivIspInputSigma  #################################
00305 
00306 void LinearARDKernel::computeGramMatrixDerivIspInputSigma(Mat& KD, int arg) const
00307 {
00308     // Precompute some terms
00309     real signal_sigma    = softplus(m_isp_signal_sigma);
00310     real input_sigma_arg = m_input_sigma[arg];
00311     real input_sigma_sq  = input_sigma_arg * input_sigma_arg;
00312     real input_sigmoid   = sigmoid(m_isp_global_sigma + m_isp_input_sigma[arg]);
00313     
00314     // Compute Gram Matrix derivative w.r.t. isp_input_sigma[arg]
00315     int  l = data->length();
00316     PLASSERT_MSG(
00317         gram_matrix.width() == l && gram_matrix.length() == l,
00318         "To compute the derivative with respect to 'isp_input_sigma[i]', the\n"
00319         "Gram matrix must be precomputed and cached in LinearARDKernel.");
00320 
00321     // Variables that walk over the data matrix
00322     int  cache_mod = m_data_cache.mod();
00323     real *data_start = &m_data_cache(0,0);
00324     real *xi = data_start+arg;               // Iterator on data rows
00325 
00326     // Variables that walk over the kernel derivative matrix (KD)
00327     KD.resize(l,l);
00328     real* KDi = KD.data();                   // Start of row i
00329     real* KDij;                              // Current element on row i
00330     int   KD_mod = KD.mod();
00331 
00332     // Iterate on rows of derivative matrix
00333     for (int i=0 ; i<l ; ++i, xi += cache_mod, KDi += KD_mod)
00334     {
00335         KDij = KDi;
00336         real *xj  = data_start+arg;           // Inner iterator on data rows
00337 
00338         // Iterate on columns of derivative matrix
00339         for (int j=0 ; j <= i ; ++j, xj += cache_mod)
00340         {
00341             // Set into derivative matrix
00342             *KDij++ = - signal_sigma * (*xi * *xj) * input_sigmoid / input_sigma_sq;
00343         }
00344     }
00345 }
00346 
00347 
00348 //#####  makeDeepCopyFromShallowCopy  #########################################
00349 
00350 void LinearARDKernel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00351 {
00352     inherited::makeDeepCopyFromShallowCopy(copies);
00353 }
00354 
00355 } // end of namespace PLearn
00356 
00357 
00358 /*
00359   Local Variables:
00360   mode:c++
00361   c-basic-offset:4
00362   c-file-style:"stroustrup"
00363   c-file-offsets:((innamespace . 0)(inline-open . 0))
00364   indent-tabs-mode:nil
00365   fill-column:79
00366   End:
00367 */
00368 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines