PLearn 0.1
distr_maths.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // distr_maths.cc
00004 // Copyright (C) 2002 Pascal Vincent
00005 //
00006 // Redistribution and use in source and binary forms, with or without
00007 // modification, are permitted provided that the following conditions are met:
00008 // 
00009 //  1. Redistributions of source code must retain the above copyright
00010 //     notice, this list of conditions and the following disclaimer.
00011 // 
00012 //  2. Redistributions in binary form must reproduce the above copyright
00013 //     notice, this list of conditions and the following disclaimer in the
00014 //     documentation and/or other materials provided with the distribution.
00015 // 
00016 //  3. The name of the authors may not be used to endorse or promote
00017 //     products derived from this software without specific prior written
00018 //     permission.
00019 // 
00020 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00021 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00022 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00023 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00025 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00026 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00027 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00028 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00029 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030 // 
00031 // This file is part of the PLearn library. For more information on the PLearn
00032 // library, go to the PLearn Web site at www.plearn.org
00033 
00034 
00035  
00036 
00037 /* *******************************************************      
00038    * $Id: distr_maths.cc 8235 2007-11-07 21:32:01Z nouiz $
00039    * This file is part of the PLearn library.
00040    ******************************************************* */
00041 
00042 
00046 #include "distr_maths.h"
00047 #include "random.h"
00048 #include "TMat_maths.h"
00049 #include "pl_erf.h" 
00050 #include "plapack.h"
00051 
00052 namespace PLearn {
00053 using namespace std;
00054 
00055 /*
00056   // *****************************
00057   // * Using eigen decomposition *
00058   // *****************************
00059 
00060   X the input matrix
00061   Let C be the covariance of X:  C = (X-mu)'.(X-mu)
00062 
00063   The eigendecomposition: 
00064   C = VDV'    where the *columns* of V are the orthonormal eigenvectors and D is a diagonal matrix with the eigenvalues lambda_1 ... lambda_d 
00065 
00066   det(C) = det(D) = product of the eigenvalues
00067   log(det(C)) = \sum_{i=1}^d [ log(lambda_i) ]
00068 
00069   inv(C) = V.inv(D).V'    (where inv(D) is a diagonal with the inverse of each eigenvalue)
00070 
00071   For a gaussian where C is the covariance matrix, mu is the mean (column vector), and x is a column vector, we have
00072   gaussian(x; mu,C) = 1/sqrt((2PI)^d * det(C)) exp( -0.5 (x-mu)'.inv(C).(x-mu) )
00073                     = 1/sqrt((2PI)^d * det(D)) exp( -0.5 (V'(x-mu))'.inv(D).(V'(x-mu)) )
00074                     = exp [ -0.5( d*log(2PI) + log(det(D)) )  -0.5(V'(x-mu))'.inv(D).(V'(x-mu)) ] 
00075                              \_______________  ____________/       \____________  ____________/
00076                                              \/                                 \/
00077                                            logcoef                              q
00078 
00079   The expression q = (V'(x-mu))'.inv(D).(V'(x-mu)) can be understood as:
00080      a) projecting vector x-mu on the orthonormal basis V, 
00081         i.e. obtaining a transformed x that we shall call y:  y = V'(x-mu)
00082         (y corresponds to x, expressed in the coordinate system V)
00083         y_i = V'_i.(x-mu)
00084 
00085      b) computing the squared norm of y , after first rescaling each coordinate by a factor 1/sqrt(lambda_i)
00086         (i.e. differences in the directions with large lambda_i are given less importance)
00087         Giving  q = sum_i[ 1/lambda_i  y_i^2]
00088 
00089   If we only keep the first k eigenvalues, and replace the following d-k ones by the same value gamma
00090   i.e.  lambda_k+1 = ... = lambda_d = gamma
00091   
00092   Then q can be expressed as:
00093     q = \sum_{i=1}^k [ 1/lambda_i y_i^2 ]   +   1/gamma \sum_{i=k+1}^d [ y_i^2 ]
00094 
00095   But, as y is just x expressed in another orthonormal basis, we have |y|^2 = |x-mu|^2
00096   ( proof: |y|^2 = |V'(x-mu)|^2 = (V'(x-mu))'.(V'(x-mu)) = (x-mu)'.V.V'.(x-mu) = (x-mu)'(x-mu) = |x-mu|^2 )
00097   
00098   Thus, we know  \sum_{i=1}^d [ y_i^2 ] = |x-mu|^2
00099   Thus \sum_{i=k+1}^d [ y_i^2 ] = |x-mu|^2 - \sum_{i=1}^k [ y_i^2 ]
00100 
00101   Consequently: 
00102     q = \sum_{i=1}^k [ 1/lambda_i y_i^2 ]   +  1/gamma ( |x-mu|^2 - \sum_{i=1}^k [ y_i^2 ] )
00103 
00104     q = \sum_{i=1}^k [ (1/lambda_i - 1/gamma) y_i^2 ]  +  1/gamma  |x-mu|^2
00105 
00106     q = \sum_{i=1}^k [ (1/lambda_i - 1/gamma) (V'_i.(x-mu))^2 ]  +  1/gamma  |x-mu|^2
00107 
00108     This gives the efficient algorithm implemented below
00109 
00110  --------------------------------------------------------------------------------------
00111 
00112  Other possibility: direct computation:
00113  
00114  Let's note X~ = X-mu
00115 
00116  We have already seen that
00117   For a gaussian where C is the covariance matrix, mu is the mean (column vector), and x is a column vector, we have
00118   gaussian(x; mu,C) = 1/sqrt((2PI)^d * det(C)) exp( -0.5 (x-mu)'.inv(C).(x-mu) )
00119                     = 1/sqrt((2PI)^d * det(D)) exp( -0.5 (V'(x-mu))'.inv(D).(V'(x-mu)) )
00120                     = exp [ -0.5( d*log(2PI) + log(det(D)) )  -0.5 (x-mu)'.inv(C).(x-mu) ] 
00121                              \_______________  ____________/       \_________  ________/
00122                                              \/                              \/
00123                                            logcoef                           q
00124   Let z = inv(C).(x-mu)
00125   ==> z is the solution of C.z = x-mu
00126   And then we have q = (x-mu)'.z
00127 
00128   So computing q is simply a matter of solving this linear equation in z,
00129   and then computing q.
00130 
00131   From my old paper we had to solve for alpha in the old notation: 
00132     " (V'V + lambda.I) alpha = V' (x-N~) "
00133   Which in our current notation corresponds to:
00134      (C + lambda.I) alpha = X~' x~
00135    If we drop the + lambda.I for now:
00136       alpha = inv(C) X~' x~
00137    and the "hyperplane distance" is then given by
00138      hd = sqnorm(x~ - X~.alpha)
00139         = sqnorm(x~ - X~.inv(C).X~'.x~)
00140         = sqnorm(x~ - X~.inv(X~'X)
00141 
00142 
00143 */
00144 
00145 
00146 
00149 
00168 real logOfCompactGaussian(const Vec& x, const Vec& mu, 
00169                           const Vec& eigenvalues, const Mat& eigenvectors,
00170                           real gamma, bool add_gamma_to_eigenval)
00171 {
00172     // cerr << "logOfCompact: mu = " << mu << endl;
00173 
00174     int i;
00175     int d = x.length();
00176     static Vec x_minus_mu;
00177     x_minus_mu.resize(d);
00178     real* px = x.data();
00179     real* pmu = mu.data();
00180     real* pxmu = x_minus_mu.data();
00181     
00182     real sqnorm_xmu = 0;
00183     for(i=0; i<d; i++)
00184     {
00185         real val = *px++ - *pmu++;
00186         sqnorm_xmu += val*val;
00187         *pxmu++ = val;
00188     }
00189     
00190     double log_det = 0.;
00191     double inv_gamma = 0;
00192     if(gamma>0)
00193         inv_gamma = 1./gamma;
00194     int kk = eigenvalues.length();
00195     double q = inv_gamma * sqnorm_xmu;
00196     for(i=0; i<kk; i++)
00197     {
00198         double lambda_i = eigenvalues[i];
00199         if(add_gamma_to_eigenval)
00200             lambda_i += gamma;
00201         if(lambda_i<=0) // we've reached a 0 eigenvalue, stop computation here.
00202             break;
00203         log_det += pl_log(lambda_i);
00204         q += (1./lambda_i - inv_gamma)*square(dot(eigenvectors(i),x_minus_mu));
00205     }
00206     if(kk<d)
00207         log_det += pl_log(gamma)*(d-i);
00208 
00209     static real log_2pi = pl_log(2*M_PI);
00210     double logp = -0.5*( d*log_2pi + log_det + q);
00211     // cerr << "logOfCompactGaussian q=" << q << " log_det=" << log_det << " logp=" << logp << endl;
00212     // exit(0);
00213     return real(logp);
00214 }
00215 
00216 real logOfNormal(const Vec& x, const Vec& mu, const Mat& C)
00217 {
00218     int n = x.length();
00219     static Vec x_mu;
00220     static Vec z;
00221     static Vec y;
00222     y.resize(n);
00223     z.resize(n);
00224     x_mu.resize(n);
00225     substract(x,mu,x_mu);
00226 
00227     static Mat L;
00228     // Perform Cholesky decomposition
00229     choleskyDecomposition(C, L);
00230 
00231     // get the log of the determinant: 
00232     // det(C) = square(product_i L_ii)
00233     double logdet = 0;
00234     for(int i=0; i<n; i++)
00235         logdet += pl_log(L(i,i));
00236     logdet += logdet;
00237 
00238     // Finally find z, such that C.z = x-mu
00239     choleskySolve(L, x_mu, z, y);
00240 
00241     double q = dot(x_mu, z);
00242     double logp = -0.5*( n*pl_log(2*M_PI) + logdet + q);
00243     // cerr << "logOfNormal q=" << q << " logdet=" << logdet << " logp=" << logp << endl;
00244     return real(logp);
00245 }
00246 
00247 real logPFittedGaussian(const Vec& x, const Mat& X, real lambda)
00248 {
00249     static Mat C;
00250     static Vec mu;
00251     computeMeanAndCovar(X, mu, C);
00252     addToDiagonal(C, lambda);
00253     return logOfNormal(x, mu, C);
00254 }
00255 
00261 real beta_density(real x, real alpha, real beta)
00262 {
00263     return exp(log_beta_density(x,alpha,beta));
00264 }
00265 real log_beta_density(real x, real alpha, real beta)
00266 {
00267     return (alpha-1)*safelog(x) + (beta-1)*safelog(1-x)  - log_beta(alpha,beta);
00268 }
00269 
00270 // return log of Normal(x;mu, sigma2*I), i.e. density of a spherical Gaussian
00271 real log_of_normal_density(Vec x, Vec mu, real sigma2)
00272 {
00273     real lp=0;
00274     for (int i=0;i<x.length();i++)
00275         lp += gauss_log_density_var(x[i],mu[i],sigma2);
00276     return lp;
00277 }
00278 
00279 real log_rbf(Vec x, Vec mu, real sigma2)
00280 {
00281     real lp=0;
00282     real inv_s=1.0/sigma2;
00283     for (int i=0;i<x.length();i++)
00284     {
00285         real diff = x[i]-mu[i];
00286         lp += -0.5*diff*diff*inv_s;
00287     }
00288     return lp;
00289 }
00290 
00291 
00292 // return log of Normal(x;mu, diag(sigma2)), i.e. density of a diagonal Gaussian
00293 real log_of_normal_density(Vec x, Vec mu, Vec sigma2)
00294 {
00295     real lp=0;
00296     for (int i=0;i<x.length();i++)
00297         lp += gauss_log_density_var(x[i],mu[i],sigma2[i]);
00298     return lp;
00299 }
00300 
00301 // return log of Normal(x;mu, Sigma), i.e. density of a full Gaussian,
00302 // where the covariance Sigma is
00303 //    Sigma = remainder_evalue*I + sum_i max(0,evalues[i]-remainder_evalue)*evectors(i)*evectors(i)'
00304 // The eigenvectors are in the ROWS of matrix evectors (because of easier row-wise access in Mat's).
00305 real log_of_normal_density(Vec x, Vec mu, Mat evectors, Vec evalues, real remainder_evalue)
00306 {
00307     return logOfCompactGaussian(x,mu,evalues,evectors,remainder_evalue,false);
00308 /*
00309     static Vec centered_x;
00310     int d=x.length();
00311     centered_x.resize(d);
00312     int k=evectors.length();
00313     real lp = -0.5 * d * Log2Pi;
00314     real irev = 0;
00315     substract(x,mu,centered_x);
00316     if (remainder_evalue>0)
00317     {
00318         irev = 1 / remainder_evalue;
00319         lp -= 0.5 * ( (d-k)*pl_log(remainder_evalue)  +  pownorm(centered_x) * irev );
00320         if (k>=d)
00321             PLERROR("log_of_normal_density: when remainder_evalue>0, there should be less e-vectors (%d) than dimensions (%d)",
00322                     k,d);
00323     }
00324     for (int i=0;i<k;i++)
00325     {
00326         real ev = evalues[i];
00327         if (ev<remainder_evalue)
00328             lp -= 0.5 * pl_log(ev);
00329         else
00330         {
00331             real iv = 1/ev - irev;
00332             lp -= 0.5 * ( pl_log(ev) + iv * square(dot(evectors(i),centered_x)));
00333         }
00334     }
00335     return lp;
00336 */
00337 }
00338 
00339 // return log of Normal(x;mu, Sigma), i.e. density of a full Gaussian,
00340 // where the covariance Sigma is
00341 //    Sigma = remainder_evalue*I + sum_i max(0,evalues[i]-remainder_evalue)*evectors(i)*evectors(i)'
00342 // The eigenvectors are in the ROWS of matrix evectors (because of easier row-wise access in Mat's).
00343 real log_fullGaussianRBF(Vec x, Vec mu, Mat evectors, Vec evalues, real remainder_evalue)
00344 {
00345     static Vec centered_x;
00346     int d=x.length();
00347     centered_x.resize(d);
00348     int k=evectors.length();
00349     real lp = 0;
00350     real irev = 0;
00351     substract(x,mu,centered_x);
00352     if (remainder_evalue>0)
00353     {
00354         irev = 1 / remainder_evalue;
00355         lp -= 0.5 * pownorm(centered_x) * irev;
00356         if (k>=d)
00357             PLERROR("log_of_normal_density: when remainder_evalue>0, there should be less e-vectors (%d) than dimensions (%d)",
00358                     k,d);
00359     }
00360     for (int i=0;i<k;i++)
00361     {
00362         real ev = evalues[i];
00363         if (ev>=remainder_evalue)
00364         {
00365             real iv = 1/ev - irev;
00366             lp -= 0.5 * iv * square(dot(evectors(i),centered_x));
00367         }
00368     }
00369     return lp;
00370 }
00371 
00372 void addEigenMatrices(Mat A_evec, Vec A_eval, Mat B_evec, Vec B_eval, Mat C_evec, Vec C_eval, bool inverses)
00373 {
00374     static Mat C;
00375     int d=A_evec.length();
00376     C.resize(d,d);
00377     C.clear();
00378     if (inverses)
00379         for (int i=0;i<d;i++)
00380         {
00381             real ai = A_eval[i], bi = B_eval[i];
00382             externalProductScaleAcc(C,A_evec(i),A_evec(i),ai!=0 ?1/ai:0);
00383             externalProductScaleAcc(C,B_evec(i),B_evec(i),bi!=0 ?1/bi:0);
00384         }
00385     else
00386         for (int i=0;i<d;i++)
00387         {
00388             externalProductScaleAcc(C,A_evec(i),A_evec(i),A_eval[i]);
00389             externalProductScaleAcc(C,B_evec(i),B_evec(i),B_eval[i]);
00390         }
00391     eigenVecOfSymmMat(C,d,C_eval,C_evec,false);
00392     if (inverses)
00393         for (int i=0;i<d;i++)
00394         {
00395             real ci = C_eval[i];
00396             if (ci!=0) C_eval[i] = 1/ci;
00397         }
00398 }
00399 
00409 void sums2Gaussian(real sum_w, Vec sum_wx, Mat sum_wx2, Vec mu, Mat cov_evectors, Vec cov_evalues, real min_variance)
00410 {
00411     if (sum_w>0)
00412     {
00413         real normf=1.0/sum_w;
00414         // mu = sum_x / sum_1
00415         multiply(sum_wx,normf,mu);
00416         // sigma = sum_x2 / sum_1  - mu mu'
00417         multiply(sum_wx2,sum_wx2,normf);
00418         externalProductScaleAcc(sum_wx2,mu,mu,-1.0);
00419         // perform eigendecoposition of the covariance matrix
00420         eigenVecOfSymmMat(sum_wx2,mu.length(),cov_evalues,cov_evectors,false);
00421     }
00422     else 
00423     {
00424         cov_evalues.fill(1.0);
00425         identityMatrix(cov_evectors);
00426     }
00427     for (int i=0;i<cov_evalues.length();i++)
00428         if (cov_evalues[i]<min_variance)
00429             cov_evalues[i]=min_variance;
00430 }
00431 
00432 } // end of namespace PLearn
00433 
00434 
00435 /*
00436   Local Variables:
00437   mode:c++
00438   c-basic-offset:4
00439   c-file-style:"stroustrup"
00440   c-file-offsets:((innamespace . 0)(inline-open . 0))
00441   indent-tabs-mode:nil
00442   fill-column:79
00443   End:
00444 */
00445 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines