PLearn 0.1
GaussianKernel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal
00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal
00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion
00008 
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036 
00037 
00038 /* *******************************************************      
00039  * $Id: GaussianKernel.cc 7285 2007-05-24 14:12:19Z plearner $
00040  * This file is part of the PLearn library.
00041  ******************************************************* */
00042 
00043 #include "GaussianKernel.h"
00044 #include <plearn/math/TMat_maths.h>
00045 
00046 //#define GK_DEBUG
00047 
00048 namespace PLearn {
00049 using namespace std;
00050 
00051 // ** GaussianKernel **
00052 
00053 PLEARN_IMPLEMENT_OBJECT(GaussianKernel,
00054                         "The good old Gaussian kernel: K(x,y) = exp(-||x-y||^2 / sigma^2).",
00055                         "Note that this is not the proper normal density (but has the same 'shape')\n"
00056                         "In particular it's not properly normalized. If you want the usual, properly\n"
00057                         "normalized Gaussian density, consider using GaussianDensityKernel instead");
00058 
00060 // GaussianKernel //
00062 GaussianKernel::GaussianKernel()
00063     : scale_by_sigma(false),
00064       sigma(1)
00065 {
00066     build_();
00067 }
00068 
00069 GaussianKernel::GaussianKernel(real the_sigma)
00070     : scale_by_sigma(false),
00071       sigma(the_sigma)
00072 {
00073     build_();
00074 }
00075 
00077 // declareOptions //
00079 void GaussianKernel::declareOptions(OptionList& ol)
00080 {
00081     declareOption(ol, "sigma", &GaussianKernel::sigma, OptionBase::buildoption,
00082                   "The width of the Gaussian.");
00083 
00084     declareOption(ol, "scale_by_sigma", &GaussianKernel::scale_by_sigma, OptionBase::buildoption,
00085                   "If set to 1, the kernel will be scaled by sigma^2 / 2");
00086 
00087     inherited::declareOptions(ol);
00088 }
00089 
00091 // build //
00093 void GaussianKernel::build()
00094 {
00095     inherited::build();
00096     build_();
00097 }
00098 
00100 // build_ //
00102 void GaussianKernel::build_()
00103 {
00104     minus_one_over_sigmasquare = -1.0/square(sigma);
00105     sigmasquare_over_two = square(sigma) / 2.0;
00106 }
00107 
00108 
00109 void GaussianKernel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00110 {
00111     inherited::makeDeepCopyFromShallowCopy(copies);
00112     deepCopyField(squarednorms,copies);
00113 }
00114 
00115 
00116 void GaussianKernel::addDataForKernelMatrix(const Vec& newRow)
00117 {
00118     inherited::addDataForKernelMatrix(newRow);
00119 
00120     int dlen  = data.length();
00121     int sqlen = squarednorms.length();
00122     if(sqlen == dlen-1)
00123         squarednorms.resize(dlen);
00124     else if(sqlen == dlen)
00125         for(int s=1; s < sqlen; s++)
00126             squarednorms[s-1] = squarednorms[s];  
00127     else
00128         PLERROR("Only two scenarios are managed:\n"
00129                 "Either the data matrix was only appended the new row or, under the windowed settings,\n"
00130                 "newRow is the new last row and other rows were moved backward.\n"
00131                 "However, sqlen = %d and dlen = %d excludes both!", sqlen, dlen);
00132   
00133     squarednorms.lastElement() = pownorm(newRow, 2); 
00134 }
00135 
00137 // evaluateFromSquaredNormOfDifference //
00139 real GaussianKernel::evaluateFromSquaredNormOfDifference(real sqnorm_of_diff) const
00140 {
00141     if (sqnorm_of_diff < 0) {
00142         if (sqnorm_of_diff * minus_one_over_sigmasquare < 1e-10 )
00143             // This can still happen when computing K(x,x), because of numerical
00144             // approximations.
00145             sqnorm_of_diff = 0;
00146         else {
00147             // This should not happen (anymore) with the isUnsafe check.
00148             // You may comment out the PLERROR below if you want to continue your
00149             // computations, but then you should investigate why this happens.
00150             PLERROR("In GaussianKernel::evaluateFromSquaredNormOfDifference - The given "
00151                     "'sqnorm_of_diff' is negative (%f)", sqnorm_of_diff);
00152             sqnorm_of_diff = 0;
00153         }
00154     }
00155     if (scale_by_sigma) {
00156         return exp(sqnorm_of_diff*minus_one_over_sigmasquare) * sigmasquare_over_two;
00157     } else {
00158         return exp(sqnorm_of_diff*minus_one_over_sigmasquare);
00159     }
00160 }
00161 
00162 real GaussianKernel::evaluateFromDotAndSquaredNorm(real sqnorm_x1, real dot_x1_x2, real sqnorm_x2) const
00163 {
00164     return evaluateFromSquaredNormOfDifference((sqnorm_x1+sqnorm_x2)-(dot_x1_x2+dot_x1_x2));
00165 }
00166 
00167 
00168 
00170 // evaluate //
00172 real GaussianKernel::evaluate(const Vec& x1, const Vec& x2) const
00173 {
00174 #ifdef BOUNDCHECK
00175     if(x1.length()!=x2.length())
00176         PLERROR("IN GaussianKernel::evaluate x1 and x2 must have the same length");
00177 #endif
00178     int l = x1.length();
00179     real* px1 = x1.data();
00180     real* px2 = x2.data();
00181     real sqnorm_of_diff = 0.;
00182     for(int i=0; i<l; i++)
00183     {
00184         real val = px1[i]-px2[i];
00185         sqnorm_of_diff += val*val;
00186     }
00187     return evaluateFromSquaredNormOfDifference(sqnorm_of_diff);
00188 }
00189 
00190 
00192 // evaluate_i_j //
00194 real GaussianKernel::evaluate_i_j(int i, int j) const
00195 { 
00196 #ifdef GK_DEBUG 
00197     if(i==0 && j==1){
00198         cout << "*** i==0 && j==1 ***" << endl;
00199         cout << "data(" << i << "): " << data(i) << endl << endl;
00200         cout << "data(" << j << "): " << data(j) << endl << endl;  
00201     
00202         real sqnorm_i = pownorm((Vec)data(i), 2);
00203         if(sqnorm_i != squarednorms[i])
00204             PLERROR("%f = sqnorm_i != squarednorms[%d] = %f", sqnorm_i, i, squarednorms[i]);
00205     
00206         real sqnorm_j = pownorm((Vec)data(j), 2);
00207         if(sqnorm_j != squarednorms[j])
00208             PLERROR("%f = sqnorm_j != squarednorms[%d] = %f", sqnorm_j, j, squarednorms[j]);
00209     }
00210 #endif
00211     real sqn_i = squarednorms[i];
00212     real sqn_j = squarednorms[j];
00213     if (isUnsafe(sqn_i, sqn_j))
00214         return inherited::evaluate_i_j(i,j);
00215     else
00216         return evaluateFromDotAndSquaredNorm(sqn_i, data->dot(i,j,data_inputsize), sqn_j); 
00217 }
00218 
00220 // evaluate_i_x //
00222 real GaussianKernel::evaluate_i_x(int i, const Vec& x, real squared_norm_of_x) const 
00223 { 
00224     if(squared_norm_of_x<0.)
00225         squared_norm_of_x = pownorm(x);
00226 
00227 #ifdef GK_DEBUG 
00228 //   real dot_x1_x2 = data->dot(i,x);
00229 //   cout << "data.row(" << i << "): " << data.row(i) << endl 
00230 //        << "squarednorms[" << i << "]: " << squarednorms[i] << endl
00231 //        << "data->dot(i,x): " << dot_x1_x2 << endl
00232 //        << "x: " << x << endl
00233 //        << "squared_norm_of_x: " << squared_norm_of_x << endl;
00234 //   real sqnorm_of_diff = (squarednorms[i]+squared_norm_of_x)-(dot_x1_x2+dot_x1_x2);
00235 //   cout << "a-> sqnorm_of_diff: " << sqnorm_of_diff << endl
00236 //        << "b-> minus_one_over_sigmasquare: " << minus_one_over_sigmasquare << endl
00237 //        << "a*b: " << sqnorm_of_diff*minus_one_over_sigmasquare << endl
00238 //        << "res: " << exp(sqnorm_of_diff*minus_one_over_sigmasquare) << endl; 
00239 #endif
00240     real sqn_i = squarednorms[i];
00241     if (isUnsafe(sqn_i, squared_norm_of_x))
00242         return inherited::evaluate_i_x(i, x, squared_norm_of_x);
00243     else
00244         return evaluateFromDotAndSquaredNorm(sqn_i, data->dot(i,x), squared_norm_of_x); 
00245 }
00246 
00247 
00249 // evaluate_x_i //
00251 real GaussianKernel::evaluate_x_i(const Vec& x, int i, real squared_norm_of_x) const
00252 { 
00253     if(squared_norm_of_x<0.)
00254         squared_norm_of_x = pownorm(x);
00255     real sqn_i = squarednorms[i];
00256     if (isUnsafe(sqn_i, squared_norm_of_x))
00257         return inherited::evaluate_x_i(x, i, squared_norm_of_x);
00258     else
00259         return evaluateFromDotAndSquaredNorm(squared_norm_of_x, data->dot(i,x), sqn_i); 
00260 }
00261 
00263 // isUnsafe //
00265 bool GaussianKernel::isUnsafe(real sqn_1, real sqn_2) const {
00266     return (sqn_1 > 1e6 && fabs(sqn_2 / sqn_1 - 1.0) < 1e-2);
00267 }
00268  
00270 // setDataForKernelMatrix //
00272 void GaussianKernel::setDataForKernelMatrix(VMat the_data)
00273 { 
00274     inherited::setDataForKernelMatrix(the_data);
00275     build_();                                // Update sigma computation cache
00276     squarednorms.resize(data.length());
00277     for(int index=0; index<data.length(); index++)
00278         squarednorms[index] = data->dot(index,index, data_inputsize);
00279 }
00280 
00282 // setParameters //
00284 void GaussianKernel::setParameters(Vec paramvec)
00285 { 
00286     PLWARNING("In GaussianKernel: setParameters is deprecated, use setOption instead");
00287     sigma = paramvec[0]; 
00288     build_();                                // Update sigma computation cache
00289 }
00290 
00291 
00292 } // end of namespace PLearn
00293 
00294 
00295 /*
00296   Local Variables:
00297   mode:c++
00298   c-basic-offset:4
00299   c-file-style:"stroustrup"
00300   c-file-offsets:((innamespace . 0)(inline-open . 0))
00301   indent-tabs-mode:nil
00302   fill-column:79
00303   End:
00304 */
00305 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines