PLearn 0.1
GenMat.h
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio and University of Montreal
00006 //
00007 
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037  
00038 
00039 /* *******************************************************      
00040  * $Id: GenMat.h 8235 2007-11-07 21:32:01Z nouiz $
00041  * Generic Matrix (template) operations
00042  * AUTHORS: Yoshua Bengio
00043  * This file is part of the PLearn library.
00044  ******************************************************* */
00045 
00046 
00049 #ifndef GenMat_INC
00050 #define GenMat_INC
00051 
00052 #include <plearn/math/TMat_maths.h>
00053 #include <plearn/math/random.h>
00054 #include <plearn/math/parpack.h>
00055 
00056 namespace PLearn {
00057 using namespace std;
00058 
00059 
00070 template<class MatT>
00071 class SquaredSymmMatT 
00072 {
00073 protected:
00074     MatT& A_;
00075     Vec Ax;
00076 public:
00077     SquaredSymmMatT(MatT& A) : A_(A), Ax(A.length()) 
00078     { 
00079         if (A.length() != A.width()) 
00080             PLERROR("SquaredSymmMatT expects a square symmetric matrix"); 
00081     }
00082     int length() const { return A_.length(); }
00083     int width() const { return A_.width(); }
00085     void product(const Vec& x, Vec& y)
00086     {
00087         product(A_, x,Ax);
00088         product(A_, Ax,y);
00089     }
00090 
00091     void diag(Vec& d) { diagonalOfSquare(A_, d); }
00092     void diagonalOfSquare(Vec& d) 
00093     { PLERROR("SquaredSymmMatT::diagonalOfSquare not implemented"); }
00094 
00095 };
00096 
00106 template<class MatT>
00107 class ReverseMatT 
00108 {
00109 protected:
00110     MatT& A_;
00111     real alpha_;
00112 public:
00113     ReverseMatT(MatT& A, real alpha) : A_(A), alpha_(alpha)
00114     { 
00115         if (A.length() != A.width()) 
00116             PLERROR("ReverseMatT expects a square symmetric matrix"); 
00117     }
00118     int length() const { return A_.length(); }
00119     int width() const { return A_.width(); }
00121     void product(const Vec& x, Vec& y)
00122     {
00123         product(A_, x,y);
00124         y*=-1;
00125         multiplyAcc(y, x,alpha_);
00126     }
00127 
00128     void diag(Vec& d) 
00129     { 
00130         diag(A_, d); 
00131         diag*=-1;
00132         diag+=alpha_;
00133     }
00134     void diagonalOfSquare(Vec& d) 
00135     {
00136         Vec diag_A(A_.length());
00137         diag(A_, diag_A);
00138         diagonalOfSquare(A_, d);
00139         multiplyAcc(d, diag_A,-2*alpha_);
00140         d+=alpha_*alpha_;
00141     }
00142 
00143 };
00144 
00156 template<class MatT>
00157 class MatTPlusSumSquaredVec
00158 {
00159 public:
00160     MatT& A_;
00161     Mat X_; 
00162 public:
00163     MatTPlusSumSquaredVec(MatT& A, int alloc_n_vectors) : A_(A), X_(alloc_n_vectors,A.length())
00164     { X_.resize(0,A.length()); }
00165     MatTPlusSumSquaredVec(MatT& A, Mat& X) : A_(A), X_(X) 
00166     { 
00167         if (X.width()!=A.length() || A.length()!=A.width()) 
00168             PLERROR("MatTPlusSumSquaredVec(MatT,Mat) arguments don't match"); 
00169     }
00170     void squaredVecAcc(Vec& x)
00171     {
00172         X_.resize(X_.length()+1,X_.width());
00173         X_(X_.length()-1) << x;
00174     }
00175     int length() const { return A_.length(); }
00176     int width() const { return A_.width(); }
00178     void product(const Vec& x, Vec& y)
00179     {
00180         product(A_, x,y);
00181         for (int t=0;t<X_.length();t++)
00182         {
00183             Vec x_t = X_(t);
00184             multiplyAcc(y, x_t,dot(x_t,x));
00185         }
00186     }
00187 
00188     void diag(Vec& d) 
00189     { 
00190         diag(A_, d); 
00191         for (int t=0;t<X_.length();t++)
00192             squareAcc(d, X_(t));
00193     }
00194     void diagonalOfSquare(Vec& d) 
00195     { PLERROR("MatTPlusSumSquaredVec::diagonalOfSquare not implemented"); }
00196 };
00197 
00198 
00200 #if 0
00201 // for debugging
00202 #define MatT Mat
00203 inline bool SolveLinearSymmSystemByCG(MatT A, Vec x, const Vec& b, int& max_iter, real& tol, real lambda)
00204 {
00205     real resid;
00206     int n=A.nrows();
00207     Vec p(n), z(n), q(n), invdiag(n);
00208     real alpha, beta, rho, previous_rho;
00209     real normb = norm(b);
00210 
00211     // inverse diagonal of A, for preconditionning
00212     diag(A, invdiag);
00213     if (lambda>0)
00214         invdiag+=lambda;
00215     invertElements(invdiag);
00216 
00217     Vec r(n);
00218     // r = b - (A+lambda*I)*x;
00219     product(A, x,r);
00220     if (lambda>0)
00221         multiplyAcc(r, x,lambda);
00222     r*=-1;
00223     r+=b;
00224 
00225     if (normb == 0.0) 
00226         normb = 1;
00227   
00228     resid = norm(r);
00229     //cout << "at 0, |r| = " << resid << endl;
00230     if ((resid / normb) <= tol) {
00231         tol = resid;
00232         max_iter = 0;
00233         return true;
00234     }
00235 
00236     for (int i = 1; i <= max_iter; i++) {
00237         // z = M.solve(r); i.e. solve M z = r, i.e. diag(A+lambda I) z = r
00238         // i.e. z_i = r_i / (A_{i,i} + lambda)
00239         multiply(r,invdiag,z);
00240 
00241         rho = dot(r, z);
00242     
00243         if (i == 1)
00244             p << z;
00245         else {
00246             beta = rho / previous_rho;
00247             // p = z + beta * p;
00248             multiplyAdd(z,p,beta,p);
00249         }
00250     
00251         // q = (A+lambda I)*p;
00252         product(A, p,q);
00253         multiplyAcc(q, p,lambda);
00254 
00255         alpha = rho / dot(p, q);
00256 
00257         // x += alpha * p;
00258         multiplyAcc(x, p,alpha);
00259         // r -= alpha * q;
00260         multiplyAcc(r, q,-alpha);
00261         resid = norm(r);
00262         // cout << "at " << i << ", |r| = " << resid << endl;
00263         if ((resid / normb) <= tol) {
00264             tol = resid;
00265             max_iter = i;
00266             return true;     
00267         }
00268 
00269         previous_rho = rho;
00270     }
00271   
00272     tol = resid;
00273     return false;
00274 }
00275 #undef MatT
00276 #else
00277 
00278 template <class MatT>
00279 bool SolveLinearSymmSystemByCG(MatT A, Vec x, const Vec& b, int& max_iter, real& tol, real lambda)
00280 {
00281     real resid;
00282     int n=A.length();
00283     Vec p(n), z(n), q(n), invdiag(n);
00284     real alpha, beta, rho, previous_rho;
00285     real normb = norm(b);
00286 
00288     diag(A, invdiag);
00289     if (lambda>0)
00290         invdiag+=lambda;
00291     invertElements(invdiag);
00292 
00293     Vec r(n);
00295     product(A, x,r);
00296     if (lambda>0)
00297         multiplyAcc(r, x,lambda);
00298     r*=-1;
00299     r+=b;
00300 
00301     if (normb == 0.0) 
00302         normb = 1;
00303   
00304     resid = norm(r);
00305     //cout << "at 0, |r| = " << resid << endl;
00306     if ((resid / normb) <= tol) {
00307         tol = resid;
00308         max_iter = 0;
00309         return true;
00310     }
00311 
00312     for (int i = 1; i <= max_iter; i++) {
00315         multiply(r,invdiag,z);
00316 
00317         rho = dot(r, z);
00318     
00319         if (i == 1)
00320             p << z;
00321         else {
00322             beta = rho / previous_rho;
00324             multiplyAdd(z,p,beta,p);
00325         }
00326     
00328         product(A, p,q);
00329         multiplyAcc(q, p,lambda);
00330 
00331         alpha = rho / dot(p, q);
00332 
00334         multiplyAcc(x, p,alpha);
00336         multiplyAcc(r, q,-alpha);
00337         resid = norm(r);
00339         if ((resid / normb) <= tol) {
00340             tol = resid;
00341             max_iter = i;
00342             return true;     
00343         }
00344 
00345         previous_rho = rho;
00346     }
00347   
00348     tol = resid;
00349     return false;
00350 }
00351 #endif
00352 
00366 #if 0
00367 
00368 #define MatT Mat
00369 inline real PowerIteration(MatT& A, Vec x0, int& n_iterations, 
00370                            real RayleighQuotientTolerance, Mat final_vectors, 
00371                            int& final_offset)
00372 {
00373     int N=final_vectors.length();
00374     if (N<3) PLERROR("PowerIteration: final_vectors.length_ = %d should be >= 3",N);
00375     Vec previous=final_vectors(0);
00376     Vec current=final_vectors(1);
00377     Vec next=final_vectors(2);
00378     previous << x0; 
00379     product(A, previous,current);
00380     real current_norm=norm(current), next_norm, current_Rq, previous_Rq=0;
00381     current/=current_norm;
00382     for (int it=1;it<=n_iterations;it++)
00383     {
00384         product(A, current,next);
00385         // check Rayleigh quotient (note that norm(current)=1)
00386         current_Rq = dot(current,next);
00387         // normalize
00388         next_norm = norm(next);
00389         next/=next_norm;
00390         cout << "at iteration " << it << ", R(A,x) = " << current_Rq << ", |Ax|/|x| = " 
00391              << next_norm << endl;
00392         if (current_Rq < previous_Rq)
00393             PLWARNING("PowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00394                       previous_Rq, current_Rq);
00395         if (it>=N && current_Rq / previous_Rq - 1 < RayleighQuotientTolerance)
00396         {
00397             // stop
00398             n_iterations = it;
00399             final_offset = it%N;
00400             return current_Rq;
00401         }
00402         // iterate
00403         previous_Rq = current_Rq;
00404         current_norm = next_norm;
00405         previous = current;
00406         current = next;
00407         next = final_vectors((it+2)%N);
00408     }
00409 }
00410 #undef MatT
00411 #else
00412 
00413 template<class MatT>
00414 real PowerIteration(MatT& A, Vec x0, int& n_iterations, 
00415                     real RayleighQuotientTolerance, Mat final_vectors, 
00416                     int& final_offset)
00417 {
00418     int N=final_vectors.length();
00419     if (N<3) PLERROR("PowerIteration: final_vectors.length_ = %d should be >= 3",N);
00420     Vec previous=final_vectors(0);
00421     Vec current=final_vectors(1);
00422     Vec next=final_vectors(2);
00423     previous << x0; 
00424     product(A, previous,current);
00425     real current_norm=norm(current), next_norm, current_Rq, previous_Rq=0;
00426     current/=current_norm;
00427     for (int it=1;it<=n_iterations;it++)
00428     {
00429         product(A, current,next);
00431         current_Rq = dot(current,next);
00433         next_norm = norm(next);
00434         next/=next_norm;
00435         //cout << "at iteration " << it << ", R(A,x) = " << current_Rq << ", |Ax|/|x| = " 
00437         if (current_Rq < previous_Rq)
00438             PLWARNING("PowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00439                       previous_Rq, current_Rq);
00440         if (it>=N && current_Rq / previous_Rq - 1 < RayleighQuotientTolerance)
00441         {
00443             n_iterations = it;
00444             final_offset = it%N;
00445             return next_norm;
00446         }
00448         previous_Rq = current_Rq;
00449         current_norm = next_norm;
00450         previous = current;
00451         current = next;
00452         next = final_vectors((it+2)%N);
00453     }
00454 }
00455 #endif
00456 
00471 int GramSchmidtOrthogonalization(Mat A, real tolerance=1e-6);
00472 
00491 template <class MatT>
00492 real PowerIteration(MatT A, Vec x0, int& n_iterations, 
00493                     real RayleighQuotientTolerance, Mat final_vectors, 
00494                     int& final_offset, bool verbose=false)
00495 {
00496     int N=final_vectors.length();
00497     if (N<3) PLERROR("PowerIteration: final_vectors.length_ = %d should be >= 3",N);
00498     Vec previous=final_vectors(0);
00499     Vec current=final_vectors(1);
00500     Vec next=final_vectors(2);
00501     previous << x0; 
00502     real max_x = max(previous);
00503     if (max_x<0) previous*=-1;
00504     product(A, previous,current);
00505     real current_norm=norm(current), next_norm, current_Rq, previous_Rq=0;
00506     max_x = max(current);
00507     if (max_x<0) current*=-1;
00508     current/=current_norm;
00509     int it=1;
00510     for (;it<=n_iterations;it++)
00511     {
00512         product(A, current,next);
00514         current_Rq = dot(current,next);
00516         next_norm = norm(next);
00517         next/=next_norm;
00518         max_x = max(next);
00519         if (max_x<0) next*=-1;
00520         if (verbose)
00521         {
00522             cout << "at iteration " << it << ", R(A,x) = " << current_Rq << ", |Ax|/|x| = " 
00523                  << next_norm << endl;
00524         }
00525         if (current_Rq < previous_Rq)
00526             PLWARNING("PowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00527                       previous_Rq, current_Rq);
00528         if (it>=N && current_Rq / previous_Rq - 1 < RayleighQuotientTolerance)
00529         {
00531             n_iterations = it;
00532             final_offset = it%N;
00533             if (verbose)
00534                 cout << "power iteration finishes with |Ax|/|x| = " << next_norm << endl;
00535             return next_norm;
00536         }
00538         previous_Rq = current_Rq;
00539         current_norm = next_norm;
00540         previous = current;
00541         current = next;
00542         next = final_vectors((it+2)%N);
00543     }
00544     final_offset = it%N;
00545     if (verbose)
00546         cout << "power iteration finishes FAILING with |Ax|/|x| = " << next_norm << endl;
00547     return next_norm;
00548 }
00549 
00574 template<class MatT>
00575 real InversePowerIteration(MatT A, Vec x0, int& n_iterations, 
00576                            int max_n_cg_iter,
00577                            real RTolerance, Mat final_vectors, 
00578                            int& final_offset, real regularizer, bool verbose=false)
00579 {
00580     int N=final_vectors.length();
00581     if (N<2) PLERROR("PowerIteration: final_vectors.length_ = %d should be >= 2",N);
00582     Vec previous=final_vectors(0);
00583     Vec current=final_vectors(1);
00584     previous << x0; 
00585     real max_x = max(previous);
00586     if (max_x<0) previous*=-1;
00587     real current_Rq, previous_Rq=0;
00588     max_x = max(current);
00589     if (max_x<0) current*=-1;
00590     normalize(previous);
00591     int it=1;
00592     Vec Ax = x0; 
00593     real current_error =0;
00594     for (;it<=n_iterations;it++)
00595     {
00596         int CGniter = max_n_cg_iter;
00597         real residue = RTolerance;
00598         current << previous;
00599         bool success=SolveLinearSymmSystemByCG(A, current, previous, CGniter, residue, regularizer);
00600         if (verbose)
00601         {
00602             if (success)
00603                 cout << "done CG in " << CGniter << " iterations with residue = " << residue << endl;
00604             else
00605                 cout << "done incomplete CG in " << CGniter << " iterations with residue = " << residue << endl;
00606         }
00607         max_x = max(current);
00608         if (max_x<0) current*=-1;
00609         normalize(current);
00611         product(A, current,Ax);
00612         current_Rq = dot(current,Ax);
00613         current_error = norm(Ax);
00614         if (verbose)
00615             cout << "at iteration " << it << ", R(A,x) = " << current_Rq << ", |Ax|/|x| = " 
00616                  << current_error << endl;
00617         if (verbose && current_Rq > (1+RTolerance)*previous_Rq)
00618             PLWARNING("InversePowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00619                       previous_Rq, current_Rq);
00620         if (it>=N && 1 - current_Rq / previous_Rq  < RTolerance)
00621         {
00623             n_iterations = it;
00624             final_offset = it%N;
00625             if (verbose)
00626                 cout << "inverse power iteration finishes with |Ax|/|x| = " << current_error << endl;
00627             return current_error;
00628         }
00630         previous_Rq = current_Rq;
00631         previous = current;
00632         current = final_vectors((it+1)%N);
00633     }
00634     final_offset = it%N;
00635     if (verbose)
00636         cout << "power iteration finishes FAILING with |Ax|/|x| = " << current_error << endl;
00637     return current_error;
00638 }
00639 
00658 template<class MatT>
00659 real findSmallestEigenPairOfSymmMat(MatT& A, Vec x, 
00660                                     real error_tolerance=1e-3,
00661                                     real improvement_tolerance=1e-4, 
00662                                     int max_n_cg_iter=0, 
00663                                     int max_n_power_iter=0, bool verbose=false)
00664 {
00665     int n=A.length();
00666     int n_try=5;
00667 
00668     if (max_n_cg_iter==0)
00669         max_n_cg_iter = 5+int(pow(double(n),0.3));
00670     if (max_n_power_iter==0)
00671         max_n_power_iter = 5+int(pl_log(n));
00672 
00673     Mat try_solutions(n_try,n);
00674     Mat kernel_solutions = x.toMat(1,n);
00675     Vec Ax(n);
00676 
00677     int max_iter = int(sqrt(max_n_power_iter));
00678     real err=1e30, prev_err=1e30;
00679     int nrepeat=0;
00680     do {
00681         int n_iter=max_iter;
00682         int offs=0;
00683         real l0 = InversePowerIteration(A,x,n_iter,max_n_cg_iter,
00684                                         improvement_tolerance,
00685                                         try_solutions,offs,error_tolerance,verbose);
00686         if (verbose)
00687             cout << "got smallest eigenvalue = " << l0
00688                  << " in " << n_iter << " iterations" << endl;
00689 
00690         if (verbose)
00691         {
00694             n_try = try_solutions.length();
00695             for (int i=0;i<n_try;i++)
00696             {
00697                 cout << "for solution " << i << endl;
00698                 Vec y = try_solutions(i);
00699                 normalize(y);
00700                 product(A, y,Ax);
00701                 prev_err=err;
00702                 err = norm(Ax);
00703                 cout << "|A y| = " << err << endl;
00704                 cout << "y. A y = " << dot(y,Ax) << endl;
00705             }
00706         }
00708 
00709         Vec evalues(n_try);
00710         Mat evectors(n_try,n_try);
00711         diagonalizeSubspace(A, try_solutions, Ax, kernel_solutions, evalues, evectors);
00712         product(A, x,Ax);
00713         err = norm(Ax);
00714         if (verbose)
00715             cout << "after diagonalization, err = " << err << endl;
00716         nrepeat++;
00717     } while (err > error_tolerance && n_try>=2 && 
00718              prev_err/err-1>improvement_tolerance && nrepeat<max_iter);
00719     if (verbose)
00720         cout << "return from findSmallestEigenPairOfSymmMat with err=" << err << endl;
00721     return err;
00722 }
00723 
00724 
00744 template<class MatT>
00745 int SymmMatNullSpaceByInversePowerIteration(MatT &A, Mat solutions,
00746                                             Vec normsAx, Vec xAx,  
00747                                             real error_tolerance=1e-3,
00748                                             real improvement_tolerance=1e-4, 
00749                                             int max_n_cg_iter=0, 
00750                                             int max_n_power_iter=0, bool verbose=false)
00751 
00752 {  
00753     int n=A.length();
00754     int n_soln=normsAx.length();
00755     solutions.resize(n_soln,n);
00756     if (max_n_cg_iter==0)
00757         max_n_cg_iter = 5+int(pow(double(n),0.3));
00758     if (max_n_power_iter==0)
00759         max_n_power_iter = 5+int(pl_log(n));
00760     Vec Ax(n);
00761 
00762     MatTPlusSumSquaredVec<MatT> B(A,n_soln);
00763     Vec sy(n);
00764     fill_random_uniform(sy);
00765     Mat largest_evecs(3,n);
00766     int offs;
00767     int n_iter = MIN(max_n_power_iter,20);
00768     real largest_evalue = PowerIteration(A, sy, n_iter ,1e-3,
00769                                          largest_evecs, offs,verbose);
00770     if (verbose)
00771         cout << "largest evalue(B) = " << largest_evalue << endl;
00772     for (int i=0;i<n_soln;i++)
00773     {
00774         Vec y = solutions(i);
00775         if (i==0)
00776             y.fill(1);
00777         else
00778             fill_random_uniform(y,0.1,0.5);
00779         real residue=findSmallestEigenPairOfSymmMat(B, y, 
00780                                                     error_tolerance,
00781                                                     improvement_tolerance,
00782                                                     max_n_cg_iter,
00783                                                     max_n_power_iter,verbose);
00784         if (verbose)
00785         {
00786             cout << "found vector with |B y| = " << residue << endl;
00787             cout << "|y| = " << norm(y) << endl;
00788             product(A, y,Ax);
00789             cout << "****** |A y| = " << norm(Ax) << endl;
00790         }
00791         multiply(y,largest_evalue,sy);
00792         B.squaredVecAcc(sy);
00793     }
00794 
00795     int n_s=GramSchmidtOrthogonalization(solutions);
00796     solutions = solutions.subMatRows(0,n_s);
00797     if (n_s<n_soln && verbose)
00798         cout << "found only " << n_s << " independent solutions out of " 
00799              << n_soln << " requested" << endl;
00800     for (int i=0;i<n_s;i++)
00801     {
00802         Vec xi = solutions(i);
00804         product(A, xi,Ax);
00805         real err = dot(xi,Ax);
00806         real normAx = norm(Ax);
00807         normsAx[i]=normAx;
00808         xAx[i]=err;
00809         if (verbose)
00810             cout << "for " << i << "-th solution x: x'Ax = " << err << ", |Ax|/|x|= "
00811                  << normAx << endl;
00812     }
00813     return n_s;
00814 }
00815 
00853 template<class MatT>
00854 int GDFindSmallEigenPairs(MatT& A,Mat X,
00855                           bool diagonalize_in_the_end=true,
00856                           real tolerance=1e-6,
00857                           int n_epochs=0,
00858                           real learning_rate=0,
00859                           int normalize_every=0,
00860                           real decrease_factor=0,
00861                           bool verbose=false)
00862 {
00863     int n=A.length();
00864     int m=X.length();
00865     if (n_epochs==0)
00866         n_epochs = 1000+10*int(sqrt(n));
00867     Vec Ax(n);
00868     real err_tolerance, sum_norms, actual_err;
00869     if (learning_rate==0)
00870     {
00872         fill_random_uniform(Ax,-1,1);
00873         Mat large_vectors(3,n);
00874         int offs;
00875         int n_iter = 10+int(sqrt(pl_log(double(n))));
00876         real max_eigen_value = 
00877             PowerIteration(A, Ax, n_iter,1e-3,large_vectors,offs,verbose);
00878         learning_rate = 2.0/max_eigen_value;
00879         if (verbose)
00880         {
00881             cout << "setting initial learning rate = 2/max_eigen_value = 2/" 
00882                  << max_eigen_value << " = " << learning_rate << endl;
00883         }
00884         err_tolerance = tolerance * max_eigen_value;
00885     }
00886     else err_tolerance = tolerance;
00887     real prev_err=1e30;
00888     int it=0;
00889     for (;it<n_epochs;it++)
00890     {
00891         real learning_rate = learning_rate / (1+it*decrease_factor);
00892         real err=0;
00893         for (int i=0;i<n;i++)
00894         {
00895             real* xi = &X(0,i);
00896             for (int d=0;d<m;d++, xi+=n)
00897             {
00899                 real gradient = matRowDotVec(A, i,X(d));
00900                 *xi -= learning_rate * gradient;
00901                 err += gradient * *xi;
00902             }
00903         }
00904         if (verbose)
00905         {
00906             cout << "at iteration " << it << " of gradient descent, est. err.= " << err << endl;
00907             cout << "lrate = " << learning_rate << endl;
00908         }
00909         if (tolerance>0)
00910         {
00911             sum_norms = 0;
00912             for (int d=0;d<m;d++)
00913                 sum_norms += norm(X(d));
00914             actual_err = err / (m*sum_norms);
00915             if (actual_err<err_tolerance) break;
00916         }
00917         if (err>prev_err)
00918             cout << "at iteration " << it << " of gradient descent, est. err.= " << err 
00919                  << " > previous error = " << prev_err << " ; lrate = " << learning_rate << endl;
00921         if (verbose)
00922             for (int d=0;d<m;d++)
00923                 cout << "norm(x[" << d << "])=" << norm(X(d)) << endl;
00924         if (normalize_every!=0 && it%normalize_every==0)
00925         {
00926             int new_m=GramSchmidtOrthogonalization(X,1e-9);
00927             for (int e=new_m;e<m;e++)
00928                 fill_random_uniform(X(e)); 
00929             if (verbose)
00930             {
00932                 real C=0;
00933                 for (int d=0;d<m;d++)
00934                 {
00935                     Vec xd = X(d);
00936                     product(A, xd,Ax);
00937                     C += dot(xd,Ax);
00938                     cout << "for " << d << ", |Ax|/|x| = " << norm(Ax) << endl;
00939                 }
00940                 cout << "actual cost = " << 0.5*C << endl;
00941             }
00942         }
00943     }
00944     if (diagonalize_in_the_end)
00945     {
00946         Mat diagonalized_solutions(m,n);
00947         Mat subspace_evectors(m,m);
00948         Vec subspace_evalues(m);
00949         diagonalizeSubspace(A,X,Ax,diagonalized_solutions,subspace_evalues,subspace_evectors);
00950         X << diagonalized_solutions;
00951     }
00952     return it;
00953 }
00954 
00961 template<class MatT>
00962 int kernelPCAfromDotProducts(MatT& dot_products,Mat embedding, int max_n_eigen_iter=300, real ncv2nev_ratio=1.5, Vec* eval=0, Mat* evec=0)
00963 {
00964     int n=dot_products.length();
00965     FORTRAN_Integer m=embedding.width();
00966     if (embedding.length()!=n)
00967         PLERROR("kernelPCAfromDotProducts: expected embedding.length()==dot_products.length(), got %d!=%d",
00968                 embedding.length(),n);
00969     if (dot_products.width()!=n)
00970         PLERROR("kernelPCAfromDotProducts: expected dot_products a square matrix, got %d x %d",
00971                 n,dot_products.width());
00972 
00973     static Vec e_values;
00974     e_values.resize(m);
00975     static Mat e_vectors;
00976     e_vectors.resize(m,n);
00977     
00978     if (evec) *evec = e_vectors;
00979     if (eval) *eval = e_values;
00980 
00981     int err=eigenSparseSymmMat(dot_products, e_values, 
00982                                e_vectors, m, max_n_eigen_iter,
00983                                true, true, false, false, ncv2nev_ratio);
00984     // change the order so that the largest e-value comes first
00985     e_values.swap();
00986     e_vectors.swapUpsideDown();
00987 
00988     if (!(err==0 || err==1))
00989         return err;
00992     static Vec feature;
00993     feature.resize(n);
00994     for (int j=0;j<m;j++)
00995     {
00996         real eval_j = e_values[j];
00997         if (eval_j<0)
00998         {
00999             PLWARNING("metricMultiDimensionalScaling::the matrix of dot-products is not positive-definite!, evalue=%g",eval_j);
01000             eval_j = -eval_j*0.2; // HEURISTIC TRICK, keep negative e-values, but smaller
01001         }
01002         real scale = sqrt(eval_j);
01003         feature << e_vectors(j);
01004         feature *= scale;
01005         embedding.column(j) << feature;
01006     }
01007     return 0;
01008 }
01009 
01018 template<class MatT>
01019 int metricMultiDimensionalScaling(MatT& square_distances,Mat embedding, int max_n_eigen_iter=300)
01020 {
01021     int n=square_distances.length();
01022     FORTRAN_Integer m=embedding.width();
01023     if (embedding.length()!=n)
01024         PLERROR("MetricMultiDimensionalScaling: expected embedding.length()==square_distances.length(), got %d!=%d",
01025                 embedding.length(),n);
01026     if (square_distances.width()!=n)
01027         PLERROR("MetricMultiDimensionalScaling: expected square_distances a square matrix, got %d x %d",
01028                 n,square_distances.width());
01029     if (square_distances.size()!=n*n)
01030         PLERROR("MetricMultiDimensionalScaling: only works on a full, non-sparse matrix\n");
01031 
01033     static Vec avg_across_rows;
01034     avg_across_rows.resize(n);
01036     columnSum(square_distances, avg_across_rows);
01037     avg_across_rows *= 1.0/n;
01038     doubleCentering(square_distances,avg_across_rows,square_distances,-0.5);
01040 
01043     static Vec e_values;
01044     e_values.resize(m);
01045     static Mat e_vectors;
01046     e_vectors.resize(m,n);
01047     int err=eigenSparseSymmMat(square_distances, e_values, 
01048                                e_vectors, m, max_n_eigen_iter);
01049     if (!(err==0 || err==1))
01050         return err;
01053     for (int j=0;j<m;j++)
01054     {
01055         real eval_j = e_values[j];
01056         if (eval_j<0)
01057             PLERROR("metricMultiDimensionalScaling::the matrix of dot-products is not positive-definite!, evalue=%g",eval_j);
01058         real scale = sqrt(eval_j);
01059         Vec feature_j = e_vectors(j);
01060         feature_j *= scale;
01061         embedding.column(j) << feature_j;
01062     }
01063     return 0;
01064 }
01065 
01066 
01067 } // end of namespace PLearn
01068 
01069 #endif
01070 
01071 
01072 /*
01073   Local Variables:
01074   mode:c++
01075   c-basic-offset:4
01076   c-file-style:"stroustrup"
01077   c-file-offsets:((innamespace . 0)(inline-open . 0))
01078   indent-tabs-mode:nil
01079   fill-column:79
01080   End:
01081 */
01082 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines