PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // 00007 00008 // Redistribution and use in source and binary forms, with or without 00009 // modification, are permitted provided that the following conditions are met: 00010 // 00011 // 1. Redistributions of source code must retain the above copyright 00012 // notice, this list of conditions and the following disclaimer. 00013 // 00014 // 2. Redistributions in binary form must reproduce the above copyright 00015 // notice, this list of conditions and the following disclaimer in the 00016 // documentation and/or other materials provided with the distribution. 00017 // 00018 // 3. The name of the authors may not be used to endorse or promote 00019 // products derived from this software without specific prior written 00020 // permission. 00021 // 00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00032 // 00033 // This file is part of the PLearn library. For more information on the PLearn 00034 // library, go to the PLearn Web site at www.plearn.org 00035 00036 /* ******************************************************* 00037 * $Id: plapack.h 9436 2008-09-04 18:48:55Z nouiz $ 00038 * This file is part of the PLearn library. 00039 ******************************************************* */ 00040 00043 #ifndef plapack_h 00044 #define plapack_h 00045 00046 #include "Mat.h" 00047 #include "TMat_maths.h" 00048 00049 #include "lapack_proto.h" 00050 00051 namespace PLearn { 00052 using namespace std; 00053 00054 // Direct lapack calls, type independent 00055 inline void lapack_Xsyevx_(char* JOBZ, char* RANGE, char* UPLO, int* N, double* A, int* LDA, double* VL, double* VU, int* IL, int* IU, double* ABSTOL, int* M, double* W, double* Z, int* LDZ, double* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00056 { dsyevx_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00057 00058 inline void lapack_Xsyevx_(char* JOBZ, char* RANGE, char* UPLO, int* N, float* A, int* LDA, float* VL, float* VU, int* IL, int* IU, float* ABSTOL, int* M, float* W, float* Z, int* LDZ, float* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00059 { ssyevx_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00060 00061 inline void lapack_Xgesdd_(char* JOBZ, int* M, int* N, double* A, int* LDA, double* S, double* U, int* LDU, double* VT, int* LDVT, double* WORK, int* LWORK, int* IWORK, int* INFO) 00062 { dgesdd_(JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, LWORK, IWORK, INFO); } 00063 00064 inline void lapack_Xgesdd_(char* JOBZ, int* M, int* N, float* A, int* LDA, float* S, float* U, int* LDU, float* VT, int* LDVT, float* WORK, int* LWORK, int* IWORK, int* INFO) 00065 { sgesdd_(JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, LWORK, IWORK, INFO); } 00066 00067 inline void lapack_Xsyevr_(char* JOBZ, char* RANGE, char* UPLO, int* N, float* A, int* LDA, float* VL, float* VU, int* IL, int* IU, float* ABSTOL, int* M, float* W, float* Z, int* LDZ, int* ISUPPZ, float* WORK, int* LWORK, int* IWORK, int* LIWORK, int* INFO) 00068 { ssyevr_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, ISUPPZ, WORK, LWORK, IWORK, LIWORK, INFO);} 00069 00070 inline void lapack_Xsyevr_(char* JOBZ, char* RANGE, char* UPLO, int* N, double* A, int* LDA, double* VL, double* VU, int* IL, int* IU, double* ABSTOL, int* M, double* W, double* Z, int* LDZ, int* ISUPPZ, double* WORK, int* LWORK, int* IWORK, int* LIWORK, int* INFO) 00071 { dsyevr_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, ISUPPZ, WORK, LWORK, IWORK, LIWORK, INFO);} 00072 00073 inline void lapack_Xsygvx_(int* ITYPE, char* JOBZ, char* RANGE, char* UPLO, int* N, double* A, int* LDA, double* B, int* LDB, double* VL, double* VU, int* IL, int* IU, double* ABSTOL, int* M, double* W, double* Z, int* LDZ, double* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00074 { dsygvx_(ITYPE, JOBZ, RANGE, UPLO, N, A, LDA, B, LDB, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00075 00076 inline void lapack_Xsygvx_(int* ITYPE, char* JOBZ, char* RANGE, char* UPLO, int* N, float* A, int* LDA, float* B, int* LDB, float* VL, float* VU, int* IL, int* IU, float* ABSTOL, int* M, float* W, float* Z, int* LDZ, float* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00077 { ssygvx_(ITYPE, JOBZ, RANGE, UPLO, N, A, LDA, B, LDB, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00078 00079 // Cholesky decomposition 00080 inline void lapack_Xpotrf_(char* UPLO, int* N, float* A, int* LDA, int* INFO) 00081 { spotrf_(UPLO, N, A, LDA, INFO); } 00082 00083 inline void lapack_Xpotrf_(char* UPLO, int* N, double* A, int* LDA, int* INFO) 00084 { dpotrf_(UPLO, N, A, LDA, INFO); } 00085 00086 // Solve linear system from Cholesky 00087 inline void lapack_Xpotrs_(char* UPLO, int* N, int* NRHS, float* A, int* LDA, float* B, int* LDB, int* INFO) 00088 { spotrs_(UPLO, N, NRHS, A, LDA, B, LDB, INFO); } 00089 00090 inline void lapack_Xpotrs_(char* UPLO, int* N, int* NRHS, double* A, int* LDA, double* B, int* LDB, int* INFO) 00091 { dpotrs_(UPLO, N, NRHS, A, LDA, B, LDB, INFO); } 00092 00093 // Expert driver for factorising and solving through Cholesky (and estimate 00094 // condition number, equilibrate, etc.) 00095 inline void lapack_Xposvx_( 00096 char* FACT, char* UPLO, int* N, int* NRHS, float* A, int* LDA, 00097 float* AF, int* LDAF, char* EQUED, float* S, float* B, int* LDB, 00098 float* X, int* LDX, float* RCOND, float* FERR, float* BERR, 00099 float* WORK, int* IWORK, int* INFO) 00100 { 00101 sposvx_(FACT, UPLO, N, NRHS, A, LDA, AF, LDAF, EQUED, S, 00102 B, LDB, X, LDX, RCOND, FERR, BERR, WORK, IWORK, INFO); 00103 } 00104 inline void lapack_Xposvx_( 00105 char* FACT, char* UPLO, int* N, int* NRHS, double* A, int* LDA, 00106 double* AF, int* LDAF, char* EQUED, double* S, double* B, int* LDB, 00107 double* X, int* LDX, double* RCOND, double* FERR, double* BERR, 00108 double* WORK, int* IWORK, int* INFO) 00109 { 00110 dposvx_(FACT, UPLO, N, NRHS, A, LDA, AF, LDAF, EQUED, S, 00111 B, LDB, X, LDX, RCOND, FERR, BERR, WORK, IWORK, INFO); 00112 } 00113 00117 00133 // (will work for float and double) 00134 template<class num_t> 00135 void lapackEIGEN(const TMat<num_t>& A, TVec<num_t>& eigenvals, TMat<num_t>& eigenvecs, char RANGE='A', num_t low=0, num_t high=0, num_t ABSTOL=0) 00136 { 00137 00138 #ifdef BOUNDCHECK 00139 if(A.length()!=A.width()) 00140 PLERROR("In lapackEIGEN length and width of A differ, it should be symmetric!"); 00141 #endif 00142 00143 char JOBZ = eigenvecs.isEmpty() ?'N' :'V'; 00144 char UPLO = 'U'; 00145 int N = A.length(); 00146 int LDA = A.mod(); 00147 00148 int IL=0, IU=0; 00149 num_t VL=0, VU=0; 00150 00151 eigenvals.resize(N); 00152 int M; // The number of eigenvalues returned 00153 00154 switch(RANGE) 00155 { 00156 case 'A': 00157 if(JOBZ=='V') 00158 eigenvecs.resize(N, N); 00159 break; 00160 case 'I': 00161 IL = int(low)+1; // +1 because fortran indexes start at 1 00162 IU = int(high)+1; 00163 if(JOBZ=='V') 00164 eigenvecs.resize(IU-IL+1, N); 00165 break; 00166 case 'V': 00167 VL = low; 00168 VU = high; 00169 if(JOBZ=='V') 00170 eigenvecs.resize(N, N); 00171 break; 00172 default: 00173 PLERROR("In lapackEIGEN: invalid RANGE character: %c",RANGE); 00174 } 00175 00176 num_t* Z = 0; 00177 int LDZ = 1; 00178 if(eigenvecs.isNotEmpty()) 00179 { 00180 Z = eigenvecs.data(); 00181 LDZ = eigenvecs.mod(); 00182 } 00183 00184 // temporary work vectors 00185 static TVec<num_t> WORK; 00186 static TVec<int> IWORK; 00187 static TVec<int> IFAIL; 00188 00189 WORK.resize(1); 00190 IWORK.resize(5*N); 00191 IFAIL.resize(N); 00192 00193 int LWORK = -1; 00194 int INFO; 00195 00196 00197 // first call to find optimal work size 00198 // cerr << '('; 00199 lapack_Xsyevx_( &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, &VL, &VU, 00200 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00201 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00202 // cerr << ')'; 00203 00204 if(INFO!=0) 00205 PLERROR("In lapackEIGEN, problem in first call to sgesdd_ to get optimal work size, returned INFO = %d",INFO); 00206 00207 // make sure we have enough space 00208 LWORK = (int) WORK[0]; // optimal size 00209 WORK.resize(LWORK); 00210 00211 // second call to do the computation 00212 // cerr << '{'; 00213 lapack_Xsyevx_( &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, &VL, &VU, 00214 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00215 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00216 // cerr << '}'; 00217 00218 if(INFO!=0) 00219 PLERROR("In lapackEIGEN, problem when calling sgesdd_ to perform computation, returned INFO = %d",INFO); 00220 00221 eigenvals.resize(M); 00222 if(JOBZ=='V') 00223 eigenvecs.resize(M, N); 00224 } 00225 00232 00254 // (will work for float and double) 00255 template<class num_t> 00256 void lapackGeneralizedEIGEN(const TMat<num_t>& A, const TMat<num_t>& B, int ITYPE, TVec<num_t>& eigenvals, TMat<num_t>& eigenvecs, char RANGE='A', num_t low=0, num_t high=0, num_t ABSTOL=0) 00257 { 00258 00259 #ifdef BOUNDCHECK 00260 if(A.length()!=A.width()) 00261 PLERROR("In lapackGeneralizedEIGEN length and width of A differ, it should be symmetric!"); 00262 #endif 00263 00264 char JOBZ = eigenvecs.isEmpty() ?'N' :'V'; 00265 char UPLO = 'U'; 00266 int N = A.length();//The order of the matrix pencil (A,B) 00267 int LDA = A.mod(); 00268 int LDB = B.mod(); 00269 00270 int IL=0, IU=0; 00271 num_t VL=0, VU=0; 00272 00273 eigenvals.resize(N); 00274 int M; // The number of eigenvalues returned 00275 00276 switch(RANGE) 00277 { 00278 case 'A': 00279 if(JOBZ=='V') 00280 eigenvecs.resize(N, N); 00281 break; 00282 case 'I': 00283 IL = int(low)+1; // +1 because fortran indexes start at 1 00284 IU = int(high)+1; 00285 if(JOBZ=='V') 00286 eigenvecs.resize(IU-IL+1, N); 00287 break; 00288 case 'V': 00289 VL = low; 00290 VU = high; 00291 if(JOBZ=='V') 00292 eigenvecs.resize(N, N); 00293 break; 00294 default: 00295 PLERROR("In lapackGeneralizedEIGEN: invalid RANGE character: %c",RANGE); 00296 } 00297 00298 num_t* Z = 0; 00299 int LDZ = 1; 00300 if(eigenvecs.isNotEmpty()) 00301 { 00302 Z = eigenvecs.data(); 00303 LDZ = eigenvecs.mod(); 00304 } 00305 00306 // temporary work vectors 00307 static TVec<num_t> WORK; 00308 static TVec<int> IWORK; 00309 static TVec<int> IFAIL; 00310 00311 WORK.resize(1); 00312 IWORK.resize(5*N); 00313 IFAIL.resize(N); 00314 00315 int LWORK = -1; 00316 int INFO; 00317 00318 00319 // first call to find optimal work size 00320 // cerr << '('; 00321 lapack_Xsygvx_( &ITYPE, &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, B.data(), &LDB, &VL, &VU, 00322 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00323 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00324 // cerr << ')'; 00325 00326 if(INFO!=0) 00327 PLERROR("In lapackGeneralizedEIGEN, problem in first call to sgesdd_ to get optimal work size, returned INFO = %d",INFO); 00328 00329 // make sure we have enough space 00330 LWORK = (int) WORK[0]; // optimal size 00331 WORK.resize(LWORK); 00332 00333 // second call to do the computation 00334 // cerr << '{'; 00335 lapack_Xsygvx_( &ITYPE, &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, B.data(), &LDB, &VL, &VU, 00336 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00337 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00338 // cerr << '}'; 00339 00340 if(INFO!=0) 00341 PLERROR("In lapackGeneralizedEIGEN, problem when calling sgesdd_ to perform computation, returned INFO = %d",INFO); 00342 00343 eigenvals.resize(M); 00344 if(JOBZ=='V') 00345 eigenvecs.resize(M, N); 00346 } 00347 00353 00354 // (will work for float and double) 00355 template<class num_t> 00356 void eigenVecOfSymmMat(TMat<num_t>& m, int k, TVec<num_t>& eigen_values, TMat<num_t>& eigen_vectors, bool verbose=true) 00357 { 00358 if (m.isEmpty()) { 00359 // Empty matrix: we just need to do some resizing. 00360 eigen_values.resize(0); 00361 eigen_vectors.resize(m.length(), m.width()); 00362 return; 00363 } 00364 if (!m.isSymmetric()) { 00365 if (m.isSymmetric(false)) 00366 { 00367 // Almost symmetric. 00368 if (verbose) 00369 PLWARNING("In eigenVecOfSymmMat - The matrix is only 'almost' symmetric, " 00370 "it will be forced to be exactly symmetric"); 00371 } 00372 else 00373 PLWARNING("In eigenVecOfSymmMat - The matrix is not symmetric, it will " 00374 "be forced to be exactly symmetric by copying the top " 00375 "right part to the bottom left part"); 00376 fillItSymmetric(m); 00377 } 00378 eigen_vectors.resize(k,m.width()); 00379 eigen_values.resize(k); 00380 // FASTER 00381 if(k>= m.width()) 00382 lapackEIGEN(m, eigen_values, eigen_vectors, 'A',num_t(0),num_t(0)); 00383 else 00384 lapackEIGEN(m, eigen_values, eigen_vectors, 'I', num_t(m.width()-k), num_t(m.width()-1)); 00385 00386 // put largest (rather than smallest) first! 00387 eigen_values.swap(); 00388 eigen_vectors.swapUpsideDown(); 00389 } 00390 00401 00402 // (will work for float and double) 00403 template<class num_t> 00404 void generalizedEigenVecOfSymmMat(TMat<num_t>& m1, TMat<num_t>& m2, int itype, int k, TVec<num_t>& eigen_values, TMat<num_t>& eigen_vectors) 00405 { 00406 if(m1.length() != m2.length() || m1.width() != m2.width()) 00407 PLERROR("In generalizedEigenVecOfSymmMat, m1 and m2 must have the same size"); 00408 00409 eigen_vectors.resize(k,m1.width()); 00410 eigen_values.resize(k); 00411 // FASTER 00412 if(k>= m1.width()) 00413 lapackGeneralizedEIGEN(m1, m2, itype, eigen_values, eigen_vectors, 'A',num_t(0),num_t(0)); 00414 else 00415 lapackGeneralizedEIGEN(m1, m2, itype, eigen_values, eigen_vectors, 'I', num_t(m1.width()-k), num_t(m1.width()-1)); 00416 00417 // put largest (rather than smallest) first! 00418 eigen_values.swap(); 00419 eigen_vectors.swapUpsideDown(); 00420 } 00421 00422 00423 00438 // (will work for float and double) 00439 template<class num_t> 00440 void lapackSVD(const TMat<num_t>& At, TMat<num_t>& Ut, TVec<num_t>& S, TMat<num_t>& V, char JOBZ='A', real safeguard = 1) 00441 { 00442 int M = At.width(); 00443 int N = At.length(); 00444 int LDA = At.mod(); 00445 int min_M_N = min(M,N); 00446 S.resize(min_M_N); 00447 00448 switch(JOBZ) 00449 { 00450 case 'A': 00451 Ut.resize(M,M); 00452 V.resize(N,N); 00453 break; 00454 case 'S': 00455 Ut.resize(min_M_N, M); 00456 V.resize(N, min_M_N); 00457 break; 00458 case 'O': 00459 if(M<N) 00460 Ut.resize(M,M); // and V is not used 00461 else 00462 V.resize(N,N); // and Ut is not used 00463 break; 00464 case 'N': 00465 break; 00466 default: 00467 PLERROR("In lapackSVD, bad JOBZ argument : %c",JOBZ); 00468 } 00469 00470 00471 int LDU = 1; 00472 int LDVT = 1; 00473 num_t* U = 0; 00474 num_t* VT = 0; 00475 00476 if(V.isNotEmpty()) 00477 { 00478 LDVT = V.mod(); 00479 VT = V.data(); 00480 } 00481 if(Ut.isNotEmpty()) 00482 { 00483 LDU = Ut.mod(); 00484 U = Ut.data(); 00485 } 00486 00487 static TVec<num_t> WORK; 00488 WORK.resize(1); 00489 int LWORK = -1; 00490 00491 static TVec<int> IWORK; 00492 IWORK.resize(8*min_M_N); 00493 00494 int INFO; 00495 00496 // first call to find optimal work size 00497 lapack_Xgesdd_(&JOBZ, &M, &N, At.data(), &LDA, S.data(), U, &LDU, VT, &LDVT, WORK.data(), &LWORK, IWORK.data(), &INFO); 00498 00499 if(INFO!=0) 00500 PLERROR("In lapackSVD, problem in first call to sgesdd_ to get optimal work size, returned INFO = %d",INFO); 00501 00502 // make sure we have enough space 00503 LWORK = int(WORK[0] * safeguard + 0.5); // optimal size (safeguard may be used to make sure it doesn't crash in some rare occasions). 00504 WORK.resize(LWORK); 00505 // cerr << "Optimal WORK size: " << LWORK << endl; 00506 00507 // second call to do the computation 00508 lapack_Xgesdd_(&JOBZ, &M, &N, At.data(), &LDA, S.data(), U, &LDU, VT, &LDVT, WORK.data(), &LWORK, IWORK.data(), &INFO ); 00509 00510 if(INFO!=0) 00511 { 00512 // cerr << At << endl; 00513 // cerr << "In lapackSVD, failed with INFO = " << INFO << endl; 00514 PLERROR("In lapackSVD, problem when calling sgesdd_ to perform computation, returned INFO = %d",INFO); 00515 } 00516 } 00517 00520 00554 // (will work for float and double) 00555 template<class num_t> 00556 inline void SVD(const TMat<num_t>& A, TMat<num_t>& U, TVec<num_t>& S, TMat<num_t>& Vt, char JOBZ='A', real safeguard = 1) 00557 { 00558 // A = U.S.Vt -> At = V.S.Ut 00559 PLASSERT( !A.hasMissing() ); 00560 lapackSVD(A,Vt,S,U,JOBZ, safeguard); 00561 } 00562 00563 00564 // DO NOT USE! 00565 // eigen_SymmMat is deprecated: use eigenVecOfSymmMat or lapackEIGEN instead"); 00582 int eigen_SymmMat(Mat& in, Vec& e_value, Mat& e_vector, int& n_evalues_found, 00583 bool compute_all, int nb_eigen, bool compute_vectors = true, 00584 bool largest_evalues=true); 00585 00586 // DO NOT USE! 00587 // eigen_SymmMat_decreasing is deprecated: use eigenVecOfSymmMat or lapackEIGEN instead"); 00589 int eigen_SymmMat_decreasing(Mat& in, Vec& e_value, Mat& e_vector, int& n_evalues_found, 00590 bool compute_all, int nb_eigen, bool compute_vectors = true, 00591 bool largest_evalues=true); 00592 00595 int matInvert(Mat& in, Mat& inverse); 00596 00599 Mat multivariate_normal(const Vec& mu, const Mat& A, int N); 00600 00603 Vec multivariate_normal(const Vec& mu, const Mat& A); 00604 00607 Vec multivariate_normal(const Vec& mu, const Vec& e_values, const Mat& e_vectors); 00608 00613 void multivariate_normal(Vec& x, const Vec& mu, const Vec& e_values, const Mat& e_vectors, Vec& z); 00614 00627 int lapackSolveLinearSystem(Mat& At, Mat& Bt, TVec<int>& pivots); 00628 00634 Mat solveLinearSystem(const Mat& A, const Mat& B); 00635 00638 Vec solveLinearSystem(const Mat& A, const Vec& b); 00639 00642 void solveLinearSystem(const Mat& A, const Mat& Y, Mat& X); 00643 00646 void solveTransposeLinearSystem(const Mat& A, const Mat& Y, Mat& X); 00647 00653 Vec constrainedLinearRegression(const Mat& Xt, const Vec& Y, real lambda=0.); 00654 00655 00666 void lapackCholeskyDecompositionInPlace(Mat& A, char uplo='L'); 00667 00683 void lapackCholeskySolveInPlace(Mat& A, Mat& B, bool B_is_column_major=false, 00684 char uplo='L'); 00685 00702 real GCV(Mat X, Mat Y, real weight_decay, bool X_is_transposed=false, Mat* W=0); 00703 00712 real GCVfromSVD(real n, real Y2minusZ2, Vec Z, Vec s); 00713 00735 real ridgeRegressionByGCV(Mat X, Mat Y, Mat W, real& best_GCV, bool X_is_transposed=false, 00736 real initial_weight_decay_guess=-1, int explore_threshold=5, real min_weight_decay=0); 00737 00738 00743 00748 real weightedRidgeRegressionByGCV(Mat X, Mat Y, Vec gamma, Mat W, real& best_gcv, real min_weight_decay=0); 00749 00750 00751 // COMMENTED OUT BECAUSE INCORRECT COMPUTATION OF GCV: CORRECT COMPUTATION DONE WITH ABOVE ROUTINES 00752 #if 0 00753 00754 00755 00756 00757 00758 00759 00760 00761 00762 00763 00764 00765 00766 00767 00768 00769 00770 00771 00772 real generalizedCVRidgeRegression(Mat inputs, Mat targets, real& best_LOOMSE, Mat* best_weights=0, Mat* best_predictions=0, bool inputs_are_transposed=false, real initial_weight_decay_guess=-1., int explore_threshold = 5); 00773 00778 real LOOMSEofRidgeRegression(Mat inputs, Mat targets, Mat weights, real weight_decay, Vec eigenvalues, Mat eigenvectors, Mat predictions, Mat RHS_matrix, bool inputs_are_transposed); 00779 #endif 00780 00781 // Return the affine transformation that 00782 // is such that the transformed data has 00783 // zero mean and identity covariance. 00784 // The input data matrix is (n x d). 00785 // The results are the matrix W and the bias vector 00786 // such that the 00787 // transformed_row = W row + bias 00788 // have the desired properties. 00789 // Note that W is obtained by doing an eigen-decomposition 00790 // of the data covariance matrix. In case this matrix 00791 // is ill-conditionned, the user can add a regularization 00792 // term on its diagonal before the inversion, by providing 00793 // a regularizer>0 as argument. 00794 void affineNormalization(Mat data, Mat W, Vec bias, real regularizer=0); 00795 00797 inline Vec closestPointOnHyperplane(const Vec& x, const Mat& points, real weight_decay = 0.) 00798 { return transposeProduct(points, constrainedLinearRegression(points,x,weight_decay)); } 00799 00801 inline real hyperplaneDistance(const Vec& x, const Mat& points, real weight_decay = 0.) 00802 { return L2distance(x, closestPointOnHyperplane(x,points,weight_decay)); } 00803 00822 template<class MatT> 00823 void diagonalizeSubspace(MatT& A, Mat& X, Vec& Ax, 00824 Mat& solutions, Vec& evalues, Mat& evectors) 00825 { 00826 int n_try=X.length(); 00827 00828 n_try=GramSchmidtOrthogonalization(X); 00829 X = X.subMatRows(0,n_try); 00830 00831 int n_soln=solutions.length(); 00833 Mat C(n_try,n_try); 00834 for (int i=0;i<n_try;i++) 00835 { 00836 real* Ci = C[i]; 00837 Vec x_i=X(i); 00838 A.product(x_i,Ax); 00839 for (int j=0;j<=i;j++) 00840 Ci[j] = dot(X(j),Ax); 00841 } 00843 for (int i=0;i<n_try;i++) 00844 { 00845 real* Ci = C[i]; 00846 for (int j=i+1;j<n_try;j++) 00847 Ci[j] = C(j,i); 00848 } 00849 00851 int n_evalues_found=0; 00852 Mat CC=C.copy(); 00853 eigen_SymmMat(CC,evalues,evectors,n_evalues_found,true,n_try,true,true); 00855 #if 0 00856 00857 Vec Cv(n_try); 00858 for (int i=0;i<n_try;i++) 00859 { 00860 Vec vi=evectors(i); 00861 if (fabs(norm(vi)-1)>1e-5) 00862 cout << "norm v[" << i << "] = " << norm(vi) << endl; 00863 product(C, vi,Cv); 00864 real ncv=norm(Cv); 00865 if (fabs(ncv-evalues[i])>1e-5) 00866 cout << "C v[" << i << "] = " << ncv << " but evalue = " << evalues[i] << endl; 00867 real vcv = dot(vi,Cv); 00868 if (fabs(vcv-evalues[i])>1e-5) 00869 cout << "v' C v[" << i << "] = " << vcv << " but evalue = " << evalues[i] << endl; 00870 for (int j=0;j<i;j++) 00871 { 00872 Vec vj=evectors(j); 00873 real dij = dot(vi,vj); 00874 if (fabs(dij)>1e-5) 00875 cout << "v[" << i << "] . v[" << j << "] = " << dij << endl; 00876 } 00877 } 00878 #endif 00879 00881 for (int i=0;i<n_soln;i++) 00882 { 00883 Vec xi=solutions(i); 00884 xi.clear(); 00885 for (int j=0;j<n_try;j++) 00886 multiplyAcc(xi, X(j),evectors(i,j)); 00887 } 00888 #if 0 00889 00890 for (int i=0;i<n_soln;i++) 00891 { 00892 Vec xi=solutions(i); 00893 real normxi=norm(xi); 00894 if (fabs(normxi-1)>1e-5) 00895 cout << "norm x[" << i << "]=" << normxi << endl; 00896 product(A, xi,Ax); 00897 cout << "Ax[" << i << "]=" << norm(Ax) << endl; 00898 real xAx = dot(Ax,xi); 00899 real err=fabs(xAx-evalues[i]); 00900 if (err>1e-5) 00901 cout << "xAx [" << i << "]=" << xAx << " but evalue = " << evalues[i] << endl; 00902 for (int j=0;j<i;j++) 00903 { 00904 Vec xj=solutions(j); 00905 err = fabs(dot(xi,xj)); 00906 if (err>1e-5) 00907 cout << "|x[" << i << "] . x[" << j << "]| = " << err << endl; 00908 } 00909 } 00910 #endif 00911 } 00912 00913 } // end of namespace PLearn 00914 00915 00916 #endif //!< plapack_h 00917 00918 00919 /* 00920 Local Variables: 00921 mode:c++ 00922 c-basic-offset:4 00923 c-file-style:"stroustrup" 00924 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00925 indent-tabs-mode:nil 00926 fill-column:79 00927 End: 00928 */ 00929 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :