PLearn 0.1
Cholesky_utils.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio and University of
00006 //                         Montreal, all rights reserved
00007 // Copyright (C) 2006 Olivier Delalleau
00008 //
00009 
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are met:
00012 // 
00013 //  1. Redistributions of source code must retain the above copyright
00014 //     notice, this list of conditions and the following disclaimer.
00015 // 
00016 //  2. Redistributions in binary form must reproduce the above copyright
00017 //     notice, this list of conditions and the following disclaimer in the
00018 //     documentation and/or other materials provided with the distribution.
00019 // 
00020 //  3. The name of the authors may not be used to endorse or promote
00021 //     products derived from this software without specific prior written
00022 //     permission.
00023 // 
00024 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00025 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00026 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00027 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00028 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00029 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00030 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00031 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00032 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00033 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00034 // 
00035 // This file is part of the PLearn library. For more information on the PLearn
00036 // library, go to the PLearn Web site at www.plearn.org
00037 
00038 //
00039 // Cholesky_utils.cc
00040 //
00041 //
00042 // Authors: Yoshua Bengio
00043 //
00044 
00047 #include <plearn/math/Cholesky_utils.h>
00048 #include <plearn/math/TMat_maths.h>
00049 
00050 
00051 namespace PLearn {
00052 using namespace std;
00053 
00055 // choleskyAppendDimension //
00057 void choleskyAppendDimension(Mat& L, const Vec& new_row)
00058 {
00059     static Vec last_row;
00060     int n = L.length();
00061     PLASSERT( L.width() == n);
00062     PLASSERT( new_row.length() == n + 1 );
00063     PLASSERT( new_row.last() >= 0 );
00064 
00065     if (n == 0) {
00066         // Simpler version for this specific case.
00067         L.resize(1, 1);
00068         L(0, 0) = sqrt(new_row[0]);
00069         PLASSERT( !L.hasMissing() );
00070         return;
00071     }
00072 
00073     last_row.resize(n);
00074     choleskyLeftSolve(L, new_row.subVec(0, n), last_row);
00075     last_row.append(sqrt(new_row.last() - pownorm(last_row)));
00076     PLASSERT( !last_row.hasMissing() );
00077     L.resize(n + 1, n + 1, 0, true);
00078     L(n) << last_row;
00079 }
00080 
00082 // choleskyRemoveDimension //
00084 void choleskyRemoveDimension(Mat& L, int i)
00085 {
00086     int p = L.length();
00087     /* Old version, will be removed when the fast version below is tested
00088 
00089     // Note that in order to use the exact algorithms from the matrix
00090     // algorithms book, we need to transpose L (since R = L' in the QR
00091     // decomposition). There may be a more efficient way to do the same
00092     // operations.
00093     L.transpose();
00094     for (int j = i; j < p - 1; j++)
00095         chol_dxch(L, j, j + 1);
00096     L = L.subMat(0, 0, p - 1, p - 1);
00097     L.transpose();
00098     */
00099     for (int j = i; j < p - 1; j++)
00100         chol_dxch_tr(L, j, j + 1);
00101     L = L.subMat(0, 0, p - 1, p - 1);
00102 }
00103 
00104 // L (n_active x n_active) is lower-diagonal and is such that A = L L' = lambda I + sum_t phi_t(x_t) phi_t(x_t)'
00105 // where the sum runs until 'now' and phi_t is the 'current' active basis set. 
00106 // In this function we update L so as to incorporate a new basis, i.e. n_active is incremented,
00107 // using a new basis whose outputs on all the examples up to 'now' are given in new_basis_outputs.
00108 // The dimensions of L are changed as a result. Note that L must have been pre-allocated
00109 // with dimensions max_n_active x max_n_active. The matrix active_bases_outputs (n_active x n_examples)
00110 // contains the outputs of the active bases that are in phi up to now.
00111 // Computational cost is O(n_active * n_examples).
00112 void choleskyInsertBasis(Mat& L, Mat active_bases_outputs, Vec new_basis_outputs, real lambda, real min_Lii)
00113 {
00114     // Let us write f = new_basis_outputs.
00115     // Let L* be the new L (we will do it in-place, though). 
00116     // It is easy to show that  L*(1:n_active,1:n_active) = L, so we only need to find
00117     // the new row of L* and its last element d=L*(n_active+1,n_active+1).
00118     // Let l be the vector with the first (current) n_active elements of that new row
00119     // and l*=L*(n_active+1,n_active+1).
00120     // 
00121     // The elements of the last row of L* L*' should be y_i = sum_t phi_{ti} f_t = f . phi_{:,i}.
00122     // The last element of the new row (lower right corner) of L* L*' should be lambda + f . f.
00123     // But the elements of the last row of L* L*' are L l. Hence we must choose l s.t. L l = y.
00124     // This can be obtained by a simple back-substitution. 
00125     // In the corner element of L* L*' we will get l*^2 + l'l but we want f'f+lambda.
00126     // Hence we set the corner element of L*, l* = sqrt(f'f+lambda-l'l).
00127     // 
00128     
00129     int n_active = L.length();
00130     static Vec y,l;
00131     if (n_active==0) { y.resize(1); l.resize(1); } // to avoid error when asking for data()
00132     y.resize(n_active);
00133     real* yp=y.data();
00134     l.resize(n_active);
00135     real* lp=l.data();
00136 
00137     // O(n_bases * n_examples)
00138     for (int i=0;i<n_active;i++)
00139         yp[i] = dot(new_basis_outputs, active_bases_outputs(i));
00140     // O(n_bases^2)
00141     choleskyLeftSolve(L,y,l);
00142     
00143     L.resize(n_active+1,n_active+1,n_active*n_active,true);
00144     real* Lplast=L[n_active];
00145     real ll=0;
00146     // O(n_bases)
00147     for (int i=0;i<n_active;i++)
00148     {
00149         Lplast[i] = lp[i];
00150         ll+=lp[i]*lp[i];
00151     }
00152     real arg = lambda + pownorm(new_basis_outputs) - ll;
00153     if (arg>0)
00154         Lplast[n_active]=sqrt(arg);
00155     else
00156         Lplast[n_active]=min_Lii;
00157 }
00158 
00159 // Given a current Cholesky decomposition of A = L L' = sum_t v_t v_t'
00160 // add an extra v_t v_t' term to the matrix A (i.e. upgrade lower-diagonal L accordingly).
00161 // computational cost: O(n^2)
00162 void choleskyUpgrade(Mat& L, Vec v)
00163 {
00164     // Algorithm: See tech report "Low Rank Updates for the Cholesky Decomposition" by Matthias Seeger, UC Berkeley, 2005.
00165     // Denote n=dim(L)
00166     // - find vector p s.t. L p = v (back-substitution)
00167     // - compute elements of vectors b and d as follows:
00168     //     u=1
00169     //     for i=1 to n-1
00170     //       a = u + p_i^2
00171     //       d_i = a/u
00172     //       b_i = p_i/a
00173     //       u = a 
00174     //     d_n = 1 + p_n^2 / u
00175     // - update other elements of L as follows:
00176     //     for i=1 to n
00177     //       s=0
00178     //       new L_ii = L_ii * sqrt(d_i)
00179     //       if i>1 r = L_ii p_i
00180     //       for j=i-1 down to 1
00181     //          s = s + r; r = L_ij p_j
00182     //          new L_ij = (L_ij + s b_j) * sqrt(d_j)
00183     //
00184     int n=L.length();
00185     static Vec p, b, d;
00186     p.resize(n);
00187     b.resize(n);
00188     d.resize(n);
00189     real* p_=p.data();
00190     real* b_=b.data();
00191     real* d_=d.data();
00192     choleskyLeftSolve(L,v,p);
00193     real u=1;
00194     for (int i=0;i<n-1;i++)
00195     {
00196         real pi = p_[i];
00197         real a = u + pi*pi;
00198         d_[i] = a/u;
00199         b_[i] = pi/a;
00200         u=a;
00201     }
00202     real lp = p_[n-1];
00203     d_[n-1]=1+lp*lp/u;
00204     for (int i=0;i<n;i++)
00205         d_[i] = sqrt(d_[i]);
00206     real r;
00207     for (int i=0;i<n;i++)
00208     {
00209         real s=0;
00210         real* Li = L[i];
00211         real Lii = Li[i];
00212         Li[i] *= d_[i];
00213         if (i>0) {
00214             r = Lii*p_[i];
00215             for (int j=i-1;j>=0;j--)
00216             {
00217                 s=s+r;
00218                 r=Li[j]*p_[j];
00219                 Li[j] = (Li[j] + s*b_[j])*d_[j];
00220             }
00221         }
00222     }
00223 }
00224 
00226 // chol_dxch //
00228 void chol_dxch(Mat& R, int l, int m)
00229 {
00230     if (l == m)
00231         return;
00232     if (l > m) {
00233         int tmp = l;
00234         l = m;
00235         m = tmp;
00236     }
00237     //static Vec tmp;
00238     int n = R.length();
00239     int p = n;
00240     //Mat first_m_rows = R.subMatRows(0, m + 1);
00241     R.subMatRows(0, m + 1).swapColumns(l, m);
00242     real c, s;
00243     for (int k = m - 1; k >= l + 1; k--) {
00244         chol_rotgen(R(k, l), R(k + 1, l), c, s);
00245         chol_rotapp(c, s, R(k).subVec(k, p - k), R(k + 1).subVec(k, p - k));
00246     }
00247     for (int k = l; k < m; k++) {
00248         chol_rotgen(R(k, k), R(k + 1, k), c, s);
00249         chol_rotapp(c, s, R(k).subVec(k + 1, p - k - 1),
00250                           R(k + 1).subVec(k + 1, p - k - 1));
00251     }
00252 }
00253 
00255 // chol_dxch_tr //
00257 void chol_dxch_tr(Mat& R, int l, int m)
00258 {
00259     if (l == m)
00260         return;
00261     if (l > m) {
00262         int tmp = l;
00263         l = m;
00264         m = tmp;
00265     }
00266     int n = R.width();
00267     int p = n;
00268     swapRows(R.subMatColumns(0, m + 1), l, m);
00269     real c, s;
00270     for (int k = m - 1; k >= l + 1; k--) {
00271         chol_rotgen(R(l, k), R(l, k + 1), c, s);
00272         chol_rotapp_tr_opt(c, s, R, k, k, k + 1, p - k);
00273         /*
00274         chol_rotapp_tr(c, s, R.subMat(k, k, p - k, 1),
00275                              R.subMat(k, k + 1, p - k, 1));
00276         */
00277     }
00278     for (int k = l; k < m; k++) {
00279         chol_rotgen(R(k, k), R(k, k + 1), c, s);
00280         chol_rotapp_tr_opt(c, s, R, k + 1, k, k + 1, p - k - 1);
00281         /*
00282         chol_rotapp_tr(c, s, R.subMat(k + 1, k, p - k - 1, 1),
00283                              R.subMat(k + 1, k + 1, p - k - 1, 1));
00284         */
00285     }
00286 }
00287 
00289 // chol_rotapp //
00291 void chol_rotapp(real c, real s, const Vec& x, const Vec& y)
00292 {
00293     static Vec t;
00294     PLASSERT( x.length() == y.length() );
00295     t.resize(x.length());
00296     t << x;
00297     multiplyScaledAdd(y, c, s, t);
00298     multiplyScaledAdd(x, c, -s, y);
00299     x << t;
00300 }
00301 
00303 // chol_rotapp_tr //
00305 void chol_rotapp_tr(real c, real s, const Mat& x, const Mat& y)
00306 {
00307     static Mat t;
00308     PLASSERT( x.length() == y.length() );
00309     PLASSERT( x.width() == 1 );
00310     t.resize(x.length(), x.width());
00311     t << x;
00312     x *= c;
00313     multiplyAcc(x, y, s);
00314     y *= c;
00315     multiplyAcc(y, t, -s);
00316 }
00317 
00319 // chol_rotapp_tr_opt //
00321 void chol_rotapp_tr_opt(real c, real s, const Mat& R,
00322                         int i, int j, int k, int m)
00323 {
00324 #ifdef USE_BLAS_SPECIALISATIONS
00325     static Mat t;
00326     t.resize(m, 1);
00327     real* t_data = t.data();
00328     real* R_i = R[i];
00329     real* x_data = R_i + j;
00330     real* y_data = R_i + k;
00331     int one = 1;
00332     int mod = R.mod();
00333     BLAS_COPY(&m, x_data, &mod, t_data, &one);
00334     BLAS_SCALE(&m, &c, x_data, &mod);
00335     BLAS_MULT_ACC(&m, &s, y_data, &mod, x_data, &mod);
00336     BLAS_SCALE(&m, &c, y_data, &mod);
00337     real minus_s = -s;
00338     BLAS_MULT_ACC(&m, &minus_s, t_data, &one, y_data, &mod);
00339 #else
00340     chol_rotapp_tr(c, s, R.subMat(i, j, m, 1), R.subMat(i, k, m, 1));
00341 #endif
00342 }
00343 
00345 // chol_rotgen //
00347 void chol_rotgen(real& a, real& b, real& c, real& s)
00348 {
00349     real t = fabs(a) + fabs(b);
00350     if (fast_exact_is_equal(t, 0)) {
00351         c = 1;
00352         s = 0;
00353         return;
00354     }
00355     real a_over_t = a / t;
00356     real b_over_t = b / t;
00357     t *= sqrt( a_over_t * a_over_t + b_over_t * b_over_t);
00358     c = a / t;
00359     s = b / t;
00360     a = t;
00361     b = 0;
00362 }
00363 
00364 } // end namespace PLearn
00365 
00366 
00367 #include <plearn/math/random.h>
00368 
00369 namespace PLearn {
00370 
00371 void testCholeskyRoutines()
00372 {
00373     int n=5,l=10;
00374     real lambda=0.1;
00375     Mat Xp(l,n+1);
00376     Mat X=Xp.subMatColumns(0,n);
00377     Mat Mp(n+1,n+1);
00378     Mat M=Mp.subMat(0,0,n,n);
00379     Mat L(n+1,n+1), testL(n,n), Lp(n+1,n+1), testLp(n+1,n+1);
00380     L.resize(n,n);
00381     fill_random_uniform(Xp,-1.,1.);
00382 
00383     identityMatrix(Mp);
00384     Mp*=lambda;
00385     choleskyDecomposition(M,testL);
00386     for (int t=0;t<l;t++)
00387     {
00388         //externalProductAcc(M,X(t),X(t));
00389         externalProductAcc(Mp,Xp(t),Xp(t));
00390         choleskyUpgrade(testL,X(t));
00391     }
00392     choleskyDecomposition(M,L);
00393     Mat testM(n,n);
00394     product(testM, L,transpose(L));
00395     testM -= M;
00396     real average_error = sumsquare(testM)/(n*n);
00397     real max_error = max(testM);
00398     cout << "Cholesky decomposition average error = " << average_error << ", max error = " << max_error << endl;
00399 
00400     // *** test choleskyUpgrade ***
00401     // compare with the batch method:
00402     testL -=L;
00403     average_error = sumsquare(testL)/(n*(n+1)/2);
00404     max_error = max(testL);
00405     cout << "average error in choleskyUpgrade for " << l << " upgrades of a " << n << " x " << n << " matrix = " << average_error <<  ", max error = " << max_error << endl;
00406 
00407     // *** test choleskyInsertBasis ***
00408     Mat bases_outputs = transpose(X);
00409     choleskyInsertBasis(L,bases_outputs, Xp.column(n).toVecCopy(), lambda, 1e-10);
00410     // compare with the batch method:
00411     choleskyDecomposition(Mp,testLp);
00412     testLp -=L;
00413     average_error = sumsquare(testLp)/((n+1)*(n+2)/2);
00414     max_error = max(testLp);
00415     cout << "average error in choleskyInsertBasis = " << average_error <<  ", max error = " << max_error << endl;
00416 }
00417 
00418 
00419 } // end of namespace PLearn
00420  
00421 
00422 
00423 /*
00424   Local Variables:
00425   mode:c++
00426   c-basic-offset:4
00427   c-file-style:"stroustrup"
00428   c-file-offsets:((innamespace . 0)(inline-open . 0))
00429   indent-tabs-mode:nil
00430   fill-column:79
00431   End:
00432 */
00433 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines