PLearn 0.1
NeuralNetworkARDKernel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NeuralNetworkARDKernel.cc
00004 //
00005 // Copyright (C) 2007 Nicolas Chapados
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Nicolas Chapados
00036 
00040 #include "NeuralNetworkARDKernel.h"
00041 
00042 namespace PLearn {
00043 using namespace std;
00044 
00045 PLEARN_IMPLEMENT_OBJECT(
00046     NeuralNetworkARDKernel,
00047     "Neural network kernel that can be used for Automatic Relevance Determination",
00048     "This kernel is designed to be used within a GaussianProcessRegressor.  It\n"
00049     "is similar to the \"arcsin\" kernel of C.E. Rasmussen's GPML code (see\n"
00050     "http://www.gaussianprocess.org), but can be used with full Automatic\n"
00051     "Relevance Determination (ARD).  It takes the form:\n"
00052     "\n"
00053     "  k(x,y) = sf * asin(2*x*P*y / sqrt((1+2*x*P*x)*(1+2*y*P*y))) * k_kron(x,y)\n"
00054     "\n"
00055     "where sf is softplus(isp_signal_sigma), P is softplus(isp_global_sigma +\n"
00056     "isp_input_sigma[i])^-2 times the unit matrix, where the x and y vectors on\n"
00057     "the right-hand-side have an extra bias (1.0) added in front.  (Note that if\n"
00058     "ARD is desired, the number of elements provided for isp_input_sigma must be\n"
00059     "ONE MORE than the number of inputs, and the first element of the\n"
00060     "isp_input_sigma vector corresponds to this bias).  Also note that in\n"
00061     "keeping with Rasmussen and Williams, we raise these elements to the -2\n"
00062     "power, so these hyperparameters can be interpreted as true length-scales.\n"
00063     "The last factor k_kron(x,y) is the result of the KroneckerBaseKernel\n"
00064     "evaluation, or 1.0 if there are no Kronecker terms.  Note that since the\n"
00065     "Kronecker terms are incorporated multiplicatively, the very presence of the\n"
00066     "term associated to this kernel can be gated by the value of some input\n"
00067     "variable(s) (that are incorporated within one or more Kronecker terms).\n"
00068     "\n"
00069     "See SquaredExponentialARDKernel for more information about using this\n"
00070     "kernel within a SummationKernel in order to add IID noise to the examples.\n"
00071     "\n"
00072     "Note that to make its operations more robust when used with unconstrained\n"
00073     "optimization of hyperparameters, all hyperparameters of this kernel are\n"
00074     "specified in the inverse softplus domain.  See IIDNoiseKernel for more\n"
00075     "explanations.\n"
00076     );
00077 
00078 
00079 NeuralNetworkARDKernel::NeuralNetworkARDKernel()
00080 { }
00081 
00082 
00083 //#####  declareOptions  ######################################################
00084 
00085 void NeuralNetworkARDKernel::declareOptions(OptionList& ol)
00086 {
00087     // Now call the parent class' declareOptions
00088     inherited::declareOptions(ol);
00089 }
00090 
00091 
00092 //#####  build  ###############################################################
00093 
00094 void NeuralNetworkARDKernel::build()
00095 {
00096     // ### Nothing to add here, simply calls build_
00097     inherited::build();
00098     build_();
00099 }
00100 
00101 
00102 //#####  build_  ##############################################################
00103 
00104 void NeuralNetworkARDKernel::build_()
00105 {
00106     // Ensure that we multiply in Kronecker terms
00107     inherited::m_default_value = 1.0;
00108 }
00109 
00110 
00111 //#####  evaluate  ############################################################
00112 
00113 real NeuralNetworkARDKernel::evaluate(const Vec& x1, const Vec& x2) const
00114 {
00115     PLASSERT( x1.size() == x2.size() );
00116     PLASSERT( !m_isp_input_sigma.size() || x1.size()+1 == m_isp_input_sigma.size() );
00117 
00118     real gating_term = inherited::evaluate(x1,x2);
00119     if (fast_is_equal(gating_term, 0.0) || x1.size() == 0)
00120         return 0.0;
00121     
00122     const real* px1 = x1.data();
00123     const real* px2 = x2.data();
00124     real sf         = softplus(m_isp_signal_sigma);
00125     real dot_x1_x1;
00126     real dot_x2_x2;
00127     real dot_x1_x2;
00128     
00129     if (m_isp_input_sigma.size() > 0) {
00130         const real* pinpsig = m_isp_input_sigma.data();
00131         real sigma = softplus(*pinpsig++);
00132         sigma *= sigma;
00133         sigma  = 2. / sigma;
00134 
00135         // Handle bias
00136         dot_x1_x1 = dot_x2_x2 = dot_x1_x2 = sigma;
00137  
00138         for (int i=0, n=x1.size() ; i<n ; ++i, ++px1, ++px2) {
00139             sigma  = softplus(*pinpsig++);
00140             sigma *= sigma;
00141             sigma  = 2. / sigma;
00142 
00143             dot_x1_x2 += *px1 * *px2 * sigma;
00144             dot_x1_x1 += *px1 * *px1 * sigma;
00145             dot_x2_x2 += *px2 * *px2 * sigma;
00146         }
00147     }
00148     else {
00149         real global_sigma = softplus(m_isp_global_sigma);
00150         global_sigma *= global_sigma;
00151         global_sigma  = 2. / global_sigma;
00152 
00153         // Handle bias for x1 and x2
00154         dot_x1_x1 = dot_x2_x2 = dot_x1_x2 = 1;
00155         
00156         for (int i=0, n=x1.size() ; i<n ; ++i, ++px1, ++px2) {
00157             dot_x1_x2 += *px1 * *px2;
00158             dot_x1_x1 += *px1 * *px1;
00159             dot_x2_x2 += *px2 * *px2;
00160         }
00161         dot_x1_x2 *= global_sigma;
00162         dot_x1_x1 *= global_sigma;
00163         dot_x2_x2 *= global_sigma;
00164     }
00165 
00166     // Gate by Kronecker term
00167     return sf * asin(dot_x1_x2 / sqrt((1 + dot_x1_x1) * (1 + dot_x2_x2))) * gating_term;
00168 }
00169 
00170 
00171 //#####  computeGramMatrix  ###################################################
00172 
00173 #define DUFF_DOTLOOP                            \
00174         sigma = *p_inpsigma++;                  \
00175         dot_x1_x2 += *x1 * *x2 * sigma;         \
00176         dot_x1_x1 += *x1 * *x1 * sigma;         \
00177         dot_x2_x2 += *x2 * *x2 * sigma;         \
00178         ++x1;                                   \
00179         ++x2;
00180 
00181 void NeuralNetworkARDKernel::computeGramMatrix(Mat K) const
00182 {
00183     PLASSERT( !m_isp_input_sigma.size() || dataInputsize()+1 == m_isp_input_sigma.size() );
00184     PLASSERT( K.size() == 0 || m_data_cache.size() > 0 );  // Ensure data cached OK
00185 
00186     // Compute Kronecker gram matrix
00187     inherited::computeGramMatrix(K);
00188 
00189     // Precompute some terms. Make sure that the input sigmas don't get too
00190     // small
00191     real sf = softplus(m_isp_signal_sigma);
00192     m_input_sigma.resize(dataInputsize() + 1);
00193     softplusFloor(m_isp_global_sigma, 1e-6);
00194     m_input_sigma.fill(m_isp_global_sigma);  // Still in ISP domain
00195     for (int i=0, n=m_input_sigma.size() ; i<n ; ++i) {
00196         if (m_isp_input_sigma.size() > 0) {
00197             softplusFloor(m_isp_input_sigma[i], 1e-6);
00198             m_input_sigma[i] += m_isp_input_sigma[i];
00199         }
00200         m_input_sigma[i]  = softplus(m_input_sigma[i]);
00201         m_input_sigma[i] *= m_input_sigma[i];
00202         m_input_sigma[i]  = 2. / m_input_sigma[i];
00203     }
00204 
00205     // Compute Gram Matrix
00206     int  l = data->length();
00207     int  m = K.mod();
00208     int  n = dataInputsize();
00209     int  cache_mod = m_data_cache.mod();
00210 
00211     real *data_start = &m_data_cache(0,0);
00212     real *Ki = K[0];                         // Start of current row
00213     real *Kij;                               // Current element along row
00214     real *input_sigma_data = m_input_sigma.data();
00215     real *xi = data_start;
00216     
00217     for (int i=0 ; i<l ; ++i, xi += cache_mod, Ki+=m)
00218     {
00219         Kij = Ki;
00220         real *xj = data_start;
00221 
00222         for (int j=0; j<=i; ++j, xj += cache_mod) {
00223             // Kernel evaluation per se
00224             real *x1 = xi;
00225             real *x2 = xj;
00226             real *p_inpsigma = input_sigma_data;
00227             int  k = n;
00228 
00229             // Handle the bias for x1 and x2
00230             real sigma     = *p_inpsigma++;
00231             real dot_x1_x1 = sigma;
00232             real dot_x2_x2 = sigma;
00233             real dot_x1_x2 = sigma;
00234 
00235             switch (k % 8) {
00236             case 0: do {  DUFF_DOTLOOP
00237             case 7:       DUFF_DOTLOOP
00238             case 6:       DUFF_DOTLOOP
00239             case 5:       DUFF_DOTLOOP
00240             case 4:       DUFF_DOTLOOP
00241             case 3:       DUFF_DOTLOOP
00242             case 2:       DUFF_DOTLOOP
00243             case 1:       DUFF_DOTLOOP  } while((k -= 8) > 0);
00244             }
00245 
00246             // Multiplicatively update kernel matrix (already pre-filled with
00247             // Kronecker terms, or 1.0 if no Kronecker terms, as per build_).
00248             real Kij_cur = *Kij * sf * asin(dot_x1_x2 / sqrt((1 + dot_x1_x1) * (1 + dot_x2_x2)));
00249             *Kij++ = Kij_cur;
00250         }
00251     }
00252     if (cache_gram_matrix) {
00253         gram_matrix.resize(l,l);
00254         gram_matrix << K;
00255         gram_matrix_is_cached = true;
00256     }
00257 }
00258 
00259 
00260 //#####  computeGramMatrixDerivative  #########################################
00261 
00262 void NeuralNetworkARDKernel::computeGramMatrixDerivative(
00263     Mat& KD, const string& kernel_param, real epsilon) const
00264 {
00265     static const string ISS("isp_signal_sigma");
00266     static const string IGS("isp_global_sigma");
00267     static const string IIS("isp_input_sigma[");
00268 
00269     if (kernel_param == ISS) {
00270         computeGramMatrixDerivIspSignalSigma(KD);
00271     }
00272     // else if (kernel_param == IGS) {
00273     //     computeGramMatrixDerivNV<
00274     //         NeuralNetworkARDKernel,
00275     //         &NeuralNetworkARDKernel::derivIspGlobalSigma>(KD, this, -1);
00276     // }
00277     // else if (string_begins_with(kernel_param, IIS) &&
00278     //          kernel_param[kernel_param.size()-1] == ']')
00279     // {
00280     //     int arg = tolong(kernel_param.substr(
00281     //                          IIS.size(), kernel_param.size() - IIS.size() - 1));
00282     //     PLASSERT( arg < m_isp_input_sigma.size() );
00283     // 
00284     //     computeGramMatrixDerivIspInputSigma(KD, arg);
00285     // 
00286     // }
00287     else
00288         inherited::computeGramMatrixDerivative(KD, kernel_param, epsilon);
00289 }
00290 
00291 
00292 //#####  evaluate_all_i_x  ####################################################
00293 
00294 void NeuralNetworkARDKernel::evaluate_all_i_x(const Vec& x, const Vec& k_xi_x,
00295                                               real squared_norm_of_x, int istart) const
00296 {
00297     evaluateAllIXNV<NeuralNetworkARDKernel>(x, k_xi_x, istart);
00298 }
00299 
00300 
00301 //#####  derivIspGlobalSigma  #################################################
00302 
00303 real NeuralNetworkARDKernel::derivIspGlobalSigma(int i, int j, int arg, real K) const
00304 {
00305     if (fast_is_equal(K,0.))
00306         return 0.;
00307 
00308     // The norm term inside the exponential may be accessed as Log(K/sf)
00309     real inner = pl_log(K / softplus(m_isp_signal_sigma));
00310     return - K * inner * sigmoid(m_isp_global_sigma) / softplus(m_isp_global_sigma);
00311 
00312     // Note: in the above expression for 'inner' there is the implicit
00313     // assumption that the input_sigma[i] are zero, which allows the
00314     // sigmoid/softplus term to be factored out of the norm summation.
00315 }
00316 
00317 
00318 //#####  computeGramMatrixDerivIspSignalSigma  ################################
00319 
00320 void NeuralNetworkARDKernel::computeGramMatrixDerivIspSignalSigma(Mat& KD) const
00321 {
00322     int l = data->length();
00323     KD.resize(l,l);
00324     PLASSERT_MSG(
00325         gram_matrix.width() == l && gram_matrix.length() == l,
00326         "To compute the derivative with respect to 'isp_signal_sigma', the\n"
00327         "Gram matrix must be precomputed and cached in NeuralNetworkARDKernel.");
00328     
00329     KD << gram_matrix;
00330     KD *= sigmoid(m_isp_signal_sigma)/softplus(m_isp_signal_sigma);
00331 }
00332 
00333 
00334 //#####  computeGramMatrixDerivIspInputSigma  #################################
00335 
00336 void NeuralNetworkARDKernel::computeGramMatrixDerivIspInputSigma(Mat& KD,
00337                                                                       int arg) const
00338 {
00339     // Precompute some terms
00340     real input_sigma_arg = m_input_sigma[arg];
00341     real input_sigma_sq  = input_sigma_arg * input_sigma_arg;
00342     real input_sigmoid   = sigmoid(m_isp_global_sigma + m_isp_input_sigma[arg]);
00343     
00344     // Compute Gram Matrix derivative w.r.t. isp_input_sigma[arg]
00345     int  l = data->length();
00346     PLASSERT_MSG(
00347         gram_matrix.width() == l && gram_matrix.length() == l,
00348         "To compute the derivative with respect to 'isp_input_sigma[i]', the\n"
00349         "Gram matrix must be precomputed and cached in NeuralNetworkARDKernel.");
00350 
00351     // Variables that walk over the data matrix
00352     int  cache_mod = m_data_cache.mod();
00353     real *data_start = &m_data_cache(0,0);
00354     real *xi = data_start+arg;               // Iterator on data rows
00355 
00356     // Variables that walk over the gram cache
00357     int   gram_cache_mod = gram_matrix.mod();
00358     real *gram_cache_row = gram_matrix.data();
00359     real *gram_cache_cur;
00360     
00361     // Variables that walk over the kernel derivative matrix (KD)
00362     KD.resize(l,l);
00363     real* KDi = KD.data();                   // Start of row i
00364     real* KDij;                              // Current element on row i
00365     int   KD_mod = KD.mod();
00366 
00367     // Iterate on rows of derivative matrix
00368     for (int i=0 ; i<l ; ++i, xi += cache_mod, KDi += KD_mod,
00369              gram_cache_row += gram_cache_mod)
00370     {
00371         KDij = KDi;
00372         real *xj  = data_start+arg;           // Inner iterator on data rows
00373         gram_cache_cur = gram_cache_row;
00374 
00375         // Iterate on columns of derivative matrix
00376         for (int j=0 ; j <= i
00377                  ; ++j, xj += cache_mod, ++gram_cache_cur)
00378         {
00379             real diff    = *xi - *xj;
00380             real sq_diff = diff * diff;
00381             real KD_cur  = 0.5 * *gram_cache_cur *
00382                            input_sigmoid * sq_diff / input_sigma_sq;
00383 
00384             // Set into derivative matrix
00385             *KDij++ = KD_cur;
00386         }
00387     }
00388 }
00389 
00390 
00391 //#####  makeDeepCopyFromShallowCopy  #########################################
00392 
00393 void NeuralNetworkARDKernel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00394 {
00395     inherited::makeDeepCopyFromShallowCopy(copies);
00396 }
00397 
00398 } // end of namespace PLearn
00399 
00400 
00401 /*
00402   Local Variables:
00403   mode:c++
00404   c-basic-offset:4
00405   c-file-style:"stroustrup"
00406   c-file-offsets:((innamespace . 0)(inline-open . 0))
00407   indent-tabs-mode:nil
00408   fill-column:79
00409   End:
00410 */
00411 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines