PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // 00007 00008 // Redistribution and use in source and binary forms, with or without 00009 // modification, are permitted provided that the following conditions are met: 00010 // 00011 // 1. Redistributions of source code must retain the above copyright 00012 // notice, this list of conditions and the following disclaimer. 00013 // 00014 // 2. Redistributions in binary form must reproduce the above copyright 00015 // notice, this list of conditions and the following disclaimer in the 00016 // documentation and/or other materials provided with the distribution. 00017 // 00018 // 3. The name of the authors may not be used to endorse or promote 00019 // products derived from this software without specific prior written 00020 // permission. 00021 // 00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00032 // 00033 // This file is part of the PLearn library. For more information on the PLearn 00034 // library, go to the PLearn Web site at www.plearn.org 00035 00036 /* ******************************************************* 00037 * $Id: plapack.cc 9143 2008-06-18 19:43:40Z nouiz $ 00038 * This file is part of the PLearn library. 00039 ******************************************************* */ 00040 00041 #include <cstdlib> 00042 #include "plapack.h" 00043 #include <algorithm> 00044 #include "random.h" 00045 00046 namespace PLearn { 00047 using namespace std; 00048 00049 int eigen_SymmMat(Mat& in, Vec& e_value, Mat& e_vector, int& n_evalues_found, 00050 bool compute_all, int nb_eigen, bool compute_vectors, bool largest_evalues) 00051 { 00052 PLWARNING("eigen_SymmMat is deprecated: use eigenVecOfSymmMat or lapackEIGEN instead"); 00053 00054 #ifndef USE_BLAS_SPECIALISATIONS 00055 PLERROR("eigen_SymmMat: LAPACK not available on this system!"); 00056 return 0; 00057 #else 00058 if (!in.isSymmetric()) 00059 PLERROR("eigen_SymmMat: Your input matrix is not symmetric\n"); 00060 00061 // some check 00062 if (nb_eigen < 1 || nb_eigen > in.length()) 00063 PLERROR("The number of desired eigenvalues (%d) must be in range [1,%d]", nb_eigen, in.length()); 00064 00065 if (compute_all) 00066 { 00067 if (e_vector.length() != in.length() || e_vector.width() != in.width()) 00068 e_vector.resize(in.length(), in.width()); 00069 if (in.length() != e_value.length()) 00070 e_value.resize(in.length()); 00071 } 00072 else 00073 { 00074 if (e_vector.length() != nb_eigen || e_vector.width() != in.width()) 00075 e_vector.resize(nb_eigen, in.width()); 00076 if (nb_eigen != e_value.length()) 00077 e_value.resize(nb_eigen); 00078 } 00079 00080 // for the moment, we do not accept sub-matrices... 00081 if (in.mod() != in.width()) 00082 PLERROR("The input matrix cannot be a sub-matrix..."); 00083 00084 // we set the parameters to call the LAPACK Fortran function 00085 // if compute_all==true, we call <ssyev> 00086 // if compute_all==false, we call <ssyevx> 00087 00088 int INFO = 1; 00089 if (compute_all) 00090 { 00091 char JOBZ; 00092 if (compute_vectors) 00093 JOBZ = 'V'; 00094 else 00095 JOBZ = 'N'; 00096 char UPLO = 'U'; 00097 int N = in.length(); 00098 real* A = in.data(); 00099 int LDA = N; 00100 real* W = new real[N]; 00101 int LWORK = 3*N; 00102 real* WORK = new real[LWORK]; 00103 00104 // we now call the LAPACK Fortran function <ssyev> 00105 #ifdef USEFLOAT 00106 ssyev_(&JOBZ, &UPLO, &N, A, &LDA, W, WORK, &LWORK, &INFO); 00107 #endif 00108 #ifdef USEDOUBLE 00109 dsyev_(&JOBZ, &UPLO, &N, A, &LDA, W, WORK, &LWORK, &INFO); 00110 #endif 00111 00112 if (INFO != 0) 00113 { 00114 PLWARNING("eigen_SymmMat: something in ssyev got wrong. Error code %d",INFO); 00115 n_evalues_found = 0; 00116 } 00117 else 00118 { 00119 n_evalues_found = N; 00120 for (int i=0; i<N; i++) 00121 e_value[i] = W[i]; 00122 00123 if (compute_vectors) 00124 { 00125 real* p_evector = e_vector.data(); 00126 real* p_a = A; 00127 for (int i=0; i<N; i++) 00128 for (int j=0; j<N; j++, p_evector++, p_a++) 00129 *p_evector = *p_a; 00130 } 00131 } 00132 delete[] W; 00133 delete[] WORK; 00134 } 00135 else 00136 { 00137 char JOBZ; 00138 if (compute_vectors) 00139 JOBZ = 'V'; 00140 else 00141 JOBZ = 'N'; 00142 char RANGE = 'I'; 00143 char UPLO = 'U'; 00144 int N = in.length(); 00145 real* A = in.data(); 00146 int LDA = N; 00147 real VL, VU; // not referenced 00148 int IL,IU; 00149 if (largest_evalues) 00150 { 00151 IL = N - nb_eigen + 1; 00152 IU = N; 00153 } 00154 else 00155 { 00156 IL = 1; 00157 IU = nb_eigen; 00158 } 00159 real ABSTOL = 1e-10; 00160 int M; 00161 real* W= new real[N]; 00162 int LDZ = N; 00163 real* Z = new real[LDZ*nb_eigen]; 00164 int LWORK = 8*N; 00165 real* WORK = new real[LWORK]; 00166 int* IWORK = new int[5*N]; 00167 int* IFAIL = new int[N]; 00168 00169 // we now call the LAPACK Fortran function <ssyevx> 00170 lapack_Xsyevx_(&JOBZ, &RANGE, &UPLO, &N, A, &LDA, &VL, &VU, &IL, &IU, &ABSTOL, &M, W, Z, &LDZ, WORK, &LWORK, IWORK, IFAIL, &INFO); 00171 00172 n_evalues_found = M; 00173 if (M != nb_eigen) 00174 cout << "eigen_SymmMat: something in ssyevx got wrong." << endl 00175 << "The number of eigenvalues found (" << M 00176 << ") is different from what we asked (" << nb_eigen << ")." << endl; 00177 00178 if (INFO != 0) 00179 { 00180 // cout << "eigen_SymmMat: something in ssyevx got wrong. Error code " 00181 // << INFO << endl << "See the man page of ssyevx for more details" 00182 // << endl; 00183 } 00184 else 00185 { 00186 for (int i=0; i<M; i++) 00187 e_value[i] = W[i]; 00188 00189 if (compute_vectors) 00190 { 00191 real* p_evector = e_vector.data(); 00192 real* p_z = Z; 00193 for (int i=0; i<M; i++) 00194 for (int j=0; j<N; j++, p_evector++, p_z++) 00195 *p_evector = *p_z; 00196 } 00197 } 00198 delete[] W; 00199 delete[] WORK; 00200 delete[] IWORK; 00201 delete[] IFAIL; 00202 } 00203 return INFO; 00204 #endif 00205 } 00206 00207 int eigen_SymmMat_decreasing(Mat& in, Vec& e_value, Mat& e_vector, int& n_evalues_found, 00208 bool compute_all, int nb_eigen, bool compute_vectors, bool largest_evalues) 00209 { 00210 PLWARNING("eigen_SymmMat_decreasing is deprecated: use eigenVecOfSymmMat or lapackEIGEN instead"); 00211 00212 int res = eigen_SymmMat(in, e_value, e_vector, n_evalues_found, 00213 compute_all, nb_eigen, compute_vectors, largest_evalues); 00214 e_value.swap(); 00215 e_vector.swapUpsideDown(); 00216 return res; 00217 } 00218 00220 // matInvert // 00222 int matInvert(Mat& in, Mat& inverse) 00223 { 00224 // If the matrix is empty, just do nothing instead of crashing. 00225 if (in.isEmpty()) { 00226 PLASSERT( inverse.isEmpty() ); 00227 return 0; 00228 } 00229 00230 #ifndef USE_BLAS_SPECIALISATIONS 00231 PLERROR("eigen_SymmMat: LAPACK not available on this system!"); 00232 return 0; 00233 #else 00234 // PLWARNING("matInvert: Your input matrix will be over-written!"); 00235 00236 // some check 00237 if (in.length() != in.width()) 00238 PLERROR("The input matrix [%dx%d] must be square!", in.length(), in.width()); 00239 // for the moment, we do not accept sub-matrices... 00240 if (in.mod() != in.width()) 00241 PLERROR("The input matrix cannot be a sub-matrix..."); 00242 00243 int M = in.length(); 00244 int N = in.length(); 00245 real* A = in.data(); 00246 int LDA = N; 00247 int* IPIV = new int[N]; 00248 int INFO; 00249 00250 #ifdef USEFLOAT 00251 sgetrf_(&M, &N, A, &LDA, IPIV, &INFO); 00252 #endif 00253 #ifdef USEDOUBLE 00254 dgetrf_(&M, &N, A, &LDA, IPIV, &INFO); 00255 #endif 00256 00257 if (INFO != 0) 00258 { 00259 cout << "In matInvert: Error doing the inversion." << endl 00260 << "Check the man page of <sgetrf> with error code " << INFO 00261 << " for more details." << endl; 00262 00263 delete[] IPIV; 00264 return INFO; 00265 } 00266 00267 int LWORK = N; 00268 real* WORK = new real[LWORK]; 00269 00270 #ifdef USEFLOAT 00271 sgetri_(&N, A, &LDA, IPIV, WORK, &LWORK, &INFO); 00272 #endif 00273 #ifdef USEDOUBLE 00274 dgetri_(&N, A, &LDA, IPIV, WORK, &LWORK, &INFO); 00275 #endif 00276 00277 if (INFO != 0) 00278 { 00279 cout << "In matInvert: Error doing the inversion." << endl 00280 << "Check the man page of <sgetri> with error code " << INFO 00281 << " for more details." << endl; 00282 00283 delete[] IPIV; 00284 delete[] WORK; 00285 return INFO; 00286 } 00287 00288 delete[] IPIV; 00289 delete[] WORK; 00290 00291 real* p_A = A; 00292 for (int i=0; i<N; i++) { 00293 real* p_inverse = inverse[i]; 00294 for (int j=0; j<M; j++, p_inverse++, p_A++) 00295 *p_inverse = *p_A; 00296 } 00297 00298 return INFO; 00299 #endif 00300 } 00301 00302 00303 int lapackSolveLinearSystem(Mat& At, Mat& Bt, TVec<int>& pivots) 00304 { 00305 #ifdef BOUNDCHECK 00306 if(At.width() != Bt.width()) 00307 PLERROR("In lapackSolveLinearSystem: Incompatible dimensions"); 00308 #endif 00309 00310 int INFO; 00311 #ifndef USE_BLAS_SPECIALISATIONS 00312 PLERROR("lapackSolveLinearSystem: can't be called unless PLearn linked with LAPACK"); 00313 #else 00314 int N = At.width(); 00315 int NRHS = Bt.length(); 00316 real* Aptr = At.data(); 00317 int LDA = At.mod(); 00318 if(pivots.length()!=N) 00319 pivots.resize(N); 00320 int* IPIVptr = pivots.data(); 00321 real* Bptr = Bt.data(); 00322 int LDB = Bt.mod(); 00323 #ifdef USEFLOAT 00324 sgesv_(&N, &NRHS, Aptr, &LDA, IPIVptr, Bptr, &LDB, &INFO); 00325 #endif 00326 #ifdef USEDOUBLE 00327 dgesv_(&N, &NRHS, Aptr, &LDA, IPIVptr, Bptr, &LDB, &INFO); 00328 #endif 00329 #endif 00330 return INFO; 00331 } 00332 00333 // for matrices A such that A.length() <= A.width(), 00334 // find X s.t. A X = Y 00335 void solveLinearSystem(const Mat& A, const Mat& Y, Mat& X) 00336 { 00337 PLERROR("solveLinearSystem: not implemented yet"); 00338 } 00339 00340 // for matrices A such that A.length() >= A.width(), 00341 // find X s.t. X A = Y 00342 void solveTransposeLinearSystem(const Mat& A, const Mat& Y, Mat& X) 00343 { 00344 PLERROR("solveTransposeLinearSystem: not implemented yet"); 00345 } 00346 00347 Mat solveLinearSystem(const Mat& A, const Mat& B) 00348 { 00349 Mat Bt = transpose(B); 00350 Mat At = transpose(A); 00351 TVec<int> pivots(A.length()); 00352 int status = lapackSolveLinearSystem(At,Bt,pivots); 00353 if(status<0) 00354 PLERROR("Illegal value in argument of lapackSolveLinearSystem"); 00355 else if(status>0) 00356 PLERROR("In solveLinearSystem: The factorization has been completed, but the factor U is exactly singular, so the solution could not be computed."); 00357 return transpose(Bt); // return X 00358 } 00359 00360 Vec solveLinearSystem(const Mat& A, const Vec& b) 00361 { return solveLinearSystem(A,b.toMat(b.length(),1)).toVec(); } 00362 00363 00364 /* 00365 real hyperplane_distance(Vec x, Mat points) 00366 { 00367 if(x.length()!=points.width()) 00368 PLERROR("In hyperplane_distance, incompatible dimensions"); 00369 Vec ref = points(0); 00370 Mat tangentvecs = points.subMatRows(1,points.length()-1).copy(); 00371 tangentvecs -= ref; 00372 Mat A = productTranspose(tangentvecs,tangentvecs); 00373 Vec b = product(tangentvecs,x-ref); 00374 Vec alpha(tangentvecs.length()); 00375 Mat alphamat(alpha.length(),1,alpha); 00376 solveLinearSystemByCholesky(A,Mat(b.length(),1,b),alphamat); 00377 return norm(ref + transposeProduct(tangentvecs,alpha) - x); 00378 } 00379 */ 00380 00381 // Returns w that minimizes ||X.w - Y||^2 + lambda.||w||^2 00382 // under constraint \sum w_i = 1 00383 // Xt is the transposed of the input matrix X; Y is the target vector. 00384 // This doesn't include any bias term. 00385 Vec constrainedLinearRegression(const Mat& Xt, const Vec& Y, real lambda) 00386 { 00387 if(Y.length()!=Xt.width()) 00388 PLERROR("In hyperplane_distance, incompatible dimensions"); 00389 00390 int n = Xt.length(); 00391 Mat A(n+1,n+1); 00392 Vec b(n+1); 00393 00394 for(int i=0; i<n; i++) 00395 { 00396 A(n,i) = 0.5; 00397 A(i,n) = 0.5; 00398 b[i] = dot(Y,Xt(i)); 00399 for(int j=0; j<n; j++) 00400 { 00401 real dotprod = dot(Xt(i),Xt(j)); 00402 if(i!=j) 00403 A(i,j) = dotprod; 00404 else 00405 A(i,j) = dotprod + lambda; 00406 } 00407 } 00408 A(n,n) = 0.; 00409 b[n] = 0.5; 00410 00411 // cerr << "A = " << A << endl; 00412 // cerr << "b = " << b << endl; 00413 // cerr << "b\\A = " << solveLinearSystem(A,b) << endl; 00414 00415 Vec w_and_l = solveLinearSystem(A,b); 00416 return w_and_l.subVec(0,n); // return w 00417 } 00418 00419 00420 //##### lapackCholeskyDecompositionInPlace ################################## 00421 00422 void lapackCholeskyDecompositionInPlace(Mat& A, char uplo) 00423 { 00424 if (A.width() == 0 || A.length() == 0) 00425 return; 00426 if (A.mod() != A.width()) 00427 PLERROR("lapackCholeskyDecompositionInPlace: matrix mod (%d) must equal " 00428 "its width (%d)", A.mod(), A.width()); 00429 if (A.width() != A.length()) 00430 PLERROR("lapackCholeskyDecompositionInPlace: matrix width (%d) and height (%d) " 00431 "must be equal", A.width(), A.length()); 00432 00433 char lapack_uplo; 00434 switch (uplo) { 00435 case 'L': 00436 case 'l': 00437 lapack_uplo = 'U'; 00438 break; 00439 00440 case 'U': 00441 case 'u': 00442 lapack_uplo = 'L'; 00443 break; 00444 00445 default: 00446 PLERROR("lapackCholeskyDecompositionInPlace: unrecognized character '%c' for " 00447 "argument 'uplo'; valid characters are 'U' and 'L'", uplo); 00448 } 00449 00450 real* data = A.data(); 00451 int N = A.width(); 00452 int INFO; 00453 00454 // call LAPACK 00455 lapack_Xpotrf_(&lapack_uplo, &N, data, &N, &INFO); 00456 00457 if (INFO == 0) 00458 return; // all successful 00459 else if (INFO < 0) 00460 PLERROR("lapackCholeskyDecompositionInPlace: implementation error; argument %d " 00461 "to xPOTRF had an illegal value", -INFO); 00462 else 00463 PLERROR("lapackCholeskyDecompositionInPlace: error in decomposition; " 00464 "leading minor of order %d is not positive definite, " 00465 "and the factorization could not be completed.", INFO); 00466 } 00467 00468 00469 //##### lapackCholeskySolveInPlace ########################################## 00470 00471 void lapackCholeskySolveInPlace(Mat& A, Mat& B, bool B_is_column_major, char uplo) 00472 { 00473 if (A.width() == 0 || A.length() == 0 || B.width() == 0 || B.length() == 0) 00474 return; 00475 if (A.mod() != A.width()) 00476 PLERROR("lapackCholeskySolveInPlace: matrix A mod (%d) must equal " 00477 "its width (%d)", A.mod(), A.width()); 00478 if (B.mod() != B.width()) 00479 PLERROR("lapackCholeskySolveInPlace: matrix B mod (%d) must equal " 00480 "its width (%d)", B.mod(), B.width()); 00481 if (A.width() != A.length()) 00482 PLERROR("lapackCholeskySolveInPlace: matrix width (%d) and height (%d) " 00483 "must be equal", A.width(), A.length()); 00484 if ((! B_is_column_major && B.length() != A.length()) || 00485 ( B_is_column_major && B.width() != A.length()) ) 00486 PLERROR("lapackCholeskySolveInPlace: matrix B length (%d) is " 00487 "incompatible with the dimensions of A (%d)", 00488 (B_is_column_major? B.width() : B.length()), A.length()); 00489 00490 char lapack_uplo; 00491 switch (uplo) { 00492 case 'L': 00493 case 'l': 00494 lapack_uplo = 'U'; 00495 break; 00496 00497 case 'U': 00498 case 'u': 00499 lapack_uplo = 'L'; 00500 break; 00501 00502 default: 00503 PLERROR("lapackCholeskySolveInPlace: unrecognized character '%c' for " 00504 "argument 'uplo'; valid characters are 'U' and 'L'", uplo); 00505 } 00506 00507 // If B is not column-major, transpose it 00508 Mat lapack_B; 00509 if (! B_is_column_major) 00510 lapack_B = transpose(B); 00511 else 00512 lapack_B = B; 00513 00514 // Prepare for call to LAPACK 00515 int N = A.width(); 00516 int NRHS = lapack_B.length(); // Don't forget it's transposed for lapack 00517 int LDA = A.length(); 00518 int LDB = lapack_B.width(); 00519 int INFO; 00520 real* A_data = A.data(); 00521 real* B_data = lapack_B.data(); 00522 00523 // Call LAPACK 00524 lapack_Xpotrs_(&lapack_uplo, &N, &NRHS, A_data, &LDA, B_data, &LDB, &INFO); 00525 00526 if (INFO < 0) 00527 PLERROR("lapackCholeskySolvePlace: implementation error; argument %d " 00528 "to xPOTRS had an illegal value", -INFO); 00529 PLASSERT( INFO == 0 ); 00530 00531 // If B was not originally column-major, transpose back result from LAPACK 00532 if (! B_is_column_major) 00533 transpose(lapack_B, B); 00534 } 00535 00536 00537 00538 Mat multivariate_normal(const Vec& mu, const Mat& A, int N) 00539 { 00540 Vec e_values; 00541 Mat e_vectors; 00542 Mat A_copy = A.copy(); 00543 int nb_evalues_found; 00544 eigen_SymmMat(A_copy, e_values, e_vectors, nb_evalues_found, true, mu.length(), true); 00545 Mat samples(0,mu.length()); 00546 for (int i = 0; i < N; i++) 00547 samples.appendRow(multivariate_normal(mu, e_values, e_vectors)); 00548 return samples; 00549 } 00550 00551 Vec multivariate_normal(const Vec& mu, const Mat& A) 00552 { 00553 return multivariate_normal(mu, A, 1).toVec(); 00554 } 00555 00556 Vec multivariate_normal(const Vec& mu, const Vec& e_values, const Mat& e_vectors) 00557 { 00558 int n = mu.length(); // the number of dimension 00559 Vec z(n), x(n); 00560 for (int i = 0; i < n; i++) 00561 z[i] = gaussian_01(); 00562 for (int i = 0; i < n; i++) 00563 { 00564 for (int j = 0; j < n; j++) 00565 x[i] += e_vectors[j][i] * sqrt(e_values[j]) * z[j]; 00566 x[i] += mu[i]; 00567 } 00568 return x; 00569 } 00570 00571 void multivariate_normal(Vec& x, const Vec& mu, const Vec& e_values, const Mat& e_vectors, Vec& z) 00572 { 00573 int n = mu.length(); // the number of dimension 00574 z.resize(n); 00575 x.resize(n); 00576 x.clear(); 00577 for (int i = 0; i < n; i++) 00578 z[i] = gaussian_01(); 00579 for (int i = 0; i < n; i++) 00580 { 00581 for (int j = 0; j < n; j++) 00582 x[i] += e_vectors[j][i] * sqrt(e_values[j]) * z[j]; 00583 x[i] += mu[i]; 00584 } 00585 } 00586 00587 void affineNormalization(Mat data, Mat W, Vec bias, real regularizer) 00588 { 00589 int d=data.width(); 00590 Vec& mu = bias; 00591 Mat covar(d,d); 00592 computeMeanAndCovar(data,mu,covar); 00593 Vec evalues(d); 00594 if (!fast_exact_is_equal(regularizer, 0)) 00595 for (int i=0;i<d;i++) 00596 covar(i,i) += regularizer; 00597 int nev=0; 00598 eigen_SymmMat(covar,evalues,W,nev,true,d,true,true); 00599 for (int i=0;i<d;i++) 00600 W(i) *= real(1.0 / sqrt(evalues[i])); 00601 mu *= - real(1.0); // bias = -mu 00602 } 00603 00604 // COMMENTED OUT BECAUSE INCORRECT COMPUTATION OF GCV 00605 #if 0 00606 00607 00608 00609 00610 00611 00612 00613 00614 00615 00616 00617 00618 00619 00620 00621 00622 00623 00624 00625 real generalizedCVRidgeRegression(Mat inputs, Mat targets, real& best_LOOMSE, Mat* best_weights, Mat* best_predictions, bool inputs_are_transposed, real initial_weight_decay_guess, int explore_threshold) 00626 { 00627 static Mat inputs_copy, U, Vt, predictions, RHS_matrix, weights, XY; 00628 static Vec singular_values, eigen_values, LOOMSE; 00629 int n_examples = inputs_are_transposed?inputs.width():inputs.length(); 00630 int n_inputs = inputs_are_transposed?inputs.length():inputs.width(); 00631 int n_outputs = targets.width(); 00632 if (targets.length()!=n_examples) 00633 PLERROR("generalizedCVRidgeRegression(Mat inputs, Mat targets, Mat weights): targets length (%d) incompatible with inputs length (%d)\n", 00634 targets.length(),n_examples); 00635 if (best_weights && (best_weights->length()!=n_outputs || best_weights->width()!=n_inputs)) 00636 PLERROR("generalizedCVRidgeRegression(Mat inputs, Mat targets, Mat weights): weights matrix dimensions was (%d,%d), expected (%d,%d)\n", 00637 best_weights->length(),best_weights->width(),n_outputs,n_inputs); 00638 00639 inputs_copy.resize(n_examples,n_inputs); 00640 predictions.resize(n_examples,n_outputs); 00641 weights.resize(n_outputs,n_inputs); 00642 int rank = min(n_examples,n_inputs); 00643 U.resize(n_examples,rank); 00644 Vt.resize(rank,n_inputs); 00645 XY.resize(n_inputs,n_outputs); 00646 RHS_matrix.resize(rank,n_outputs); 00647 singular_values.resize(rank); 00648 eigen_values.resize(rank); 00649 LOOMSE.resize(rank); 00650 LOOMSE.fill(-1.); 00651 if (inputs_are_transposed) 00652 transpose(inputs, inputs_copy); 00653 else 00654 inputs_copy << inputs; 00655 00656 transposeProduct(XY,inputs_copy,targets); 00657 00658 // the computational cost of the SVD is O(rank^3) 00659 SVD(inputs_copy,U,singular_values,Vt,'S'); 00660 00661 product(RHS_matrix,Vt,XY); 00662 00663 real trace_of_design_matrix = 0; 00664 for (int i=0;i<rank;i++) 00665 { 00666 eigen_values[i] = singular_values[i]*singular_values[i]; 00667 trace_of_design_matrix += eigen_values[i]; 00668 } 00669 00670 // search among cut-off eigen-values 00671 best_LOOMSE = 1e38; 00672 real best_weight_decay = 0; 00673 if (initial_weight_decay_guess<0) // TRY ALL EIGENVALUES 00674 for (int i=1;i<rank;i++) 00675 { 00676 real weight_decay = exp(0.5*(pl_log(eigen_values[i-1])+pl_log(eigen_values[i]))); 00677 LOOMSE[i] = LOOMSEofRidgeRegression(inputs,targets,weights,weight_decay,eigen_values,Vt, 00678 predictions, RHS_matrix,inputs_are_transposed); 00679 if (LOOMSE[i]<best_LOOMSE) 00680 { 00681 best_LOOMSE=LOOMSE[i]; 00682 best_weight_decay = weight_decay; 00683 if (best_predictions) 00684 *best_predictions << predictions; 00685 if (best_weights) 00686 *best_weights << weights; 00687 } 00688 } 00689 else // BE MORE GREEDY: DO A SEARCH FROM INITIAL GUESS 00690 { 00691 // first find eigenvalue closest to initial guess 00692 Vec weight_decays(rank); 00693 weight_decays[0] = eigen_values[0]; 00694 for (int i=1;i<rank;i++) 00695 weight_decays[i] = exp(0.5*(pl_log(eigen_values[i-1])+pl_log(eigen_values[i]))); 00696 int closest = 0; 00697 real eval_dist = fabs(weight_decays[0]-initial_weight_decay_guess); 00698 for (int i=1;i<rank;i++) 00699 { 00700 real dist = fabs(weight_decays[i]-initial_weight_decay_guess); 00701 if (dist < eval_dist) 00702 { 00703 eval_dist = dist; 00704 closest = i; 00705 } 00706 } 00707 // how well are we doing there? 00708 best_weight_decay = weight_decays[closest]; 00709 int best_i = closest; 00710 best_LOOMSE = LOOMSE[closest] = LOOMSEofRidgeRegression(inputs,targets,weights,weight_decays[closest],eigen_values,Vt, 00711 predictions, RHS_matrix,inputs_are_transposed); 00712 if (best_predictions) 00713 *best_predictions << predictions; 00714 if (best_weights) 00715 *best_weights << weights; 00716 // then explore around it, first one way, then the other, until it looks like we can't get better 00717 int left=closest; 00718 int right=closest; 00719 if (right<rank-1) 00720 right++; 00721 else 00722 left--; 00723 while (left>=0 || right<rank) 00724 { 00725 bool improved = false; 00726 if (LOOMSE[left]<0) 00727 { 00728 LOOMSE[left] = LOOMSEofRidgeRegression(inputs,targets,weights,weight_decays[left],eigen_values,Vt, 00729 predictions, RHS_matrix,inputs_are_transposed); 00730 if (LOOMSE[left]<best_LOOMSE) 00731 { 00732 best_LOOMSE=LOOMSE[left]; 00733 best_weight_decay = weight_decays[left]; 00734 best_i = left; 00735 if (best_predictions) 00736 *best_predictions << predictions; 00737 if (best_weights) 00738 *best_weights << weights; 00739 if (left>0) 00740 { 00741 left--; 00742 improved = true; 00743 } 00744 } 00745 } 00746 if (LOOMSE[right]<0) 00747 { 00748 LOOMSE[right] = LOOMSEofRidgeRegression(inputs,targets,weights,weight_decays[right],eigen_values,Vt, 00749 predictions, RHS_matrix,inputs_are_transposed); 00750 if (LOOMSE[right]<best_LOOMSE) 00751 { 00752 best_LOOMSE=LOOMSE[right]; 00753 best_weight_decay = weight_decays[right]; 00754 best_i = right; 00755 if (best_predictions) 00756 *best_predictions << predictions; 00757 if (best_weights) 00758 *best_weights << weights; 00759 if (right<rank-1) 00760 { 00761 right++; 00762 improved = true; 00763 } 00764 } 00765 } 00766 if (!improved) 00767 { 00768 if (best_i - left < right - best_i) 00769 { 00770 if (best_i - left < explore_threshold) 00771 { 00772 if (left>0) 00773 left--; 00774 else if (right - best_i < explore_threshold && right<rank-1) 00775 right++; 00776 else break; 00777 } 00778 else break; 00779 } 00780 else 00781 { 00782 if (right - best_i < explore_threshold) 00783 { 00784 if (right<rank-1) 00785 right++; 00786 else if (best_i - left < explore_threshold && left>0) 00787 left--; 00788 else break; 00789 } 00790 else break; 00791 } 00792 } 00793 } 00794 } 00795 return best_weight_decay; 00796 } 00797 #endif 00798 00815 real GCV(Mat X, Mat Y, real weight_decay, bool X_is_transposed, Mat* W) 00816 { 00817 int n = Y.length(); 00818 int m = Y.width(); 00819 int p, nx; 00820 if (X_is_transposed) 00821 { 00822 nx=X.width(); 00823 p=X.length(); 00824 } else { 00825 nx=X.length(); 00826 p=X.width(); 00827 } 00828 if (nx!=n) 00829 PLERROR("GCV: incompatible arguments X and Y don't have same number of examples: %d and %d\n",nx,n); 00830 if (W && W->length()!=m) 00831 PLERROR("GCV: incompatible arguments W and Y don't have compatible dimensions: %d and %d\n",W->length(),m); 00832 if (W && W->width()!=p) 00833 PLERROR("GCV: incompatible arguments W and X don't have compatible dimensions: %d and %d\n",W->width(),p); 00834 static Mat Xcopy, U, Vt, Z; 00835 static Vec singular_values, eigen_values, squaredZ, s; 00836 Xcopy.resize(n,p); 00837 if (X_is_transposed) 00838 transpose(X, Xcopy); 00839 else 00840 Xcopy << X; 00841 int rank = min(n,p); 00842 U.resize(n,rank); 00843 Vt.resize(rank,p); 00844 singular_values.resize(rank); 00845 eigen_values.resize(rank); 00846 Z.resize(rank,1); 00847 squaredZ.resize(rank); 00848 s.resize(rank); 00849 Vec z=Z.toVec(); 00850 00851 SVD(Xcopy, U, singular_values, Vt, 'S'); 00852 for (int i=0;i<rank;i++) 00853 { 00854 eigen_values[i] = singular_values[i]*singular_values[i]; 00855 s[i] = weight_decay / (weight_decay + eigen_values[i]); 00856 } 00857 00858 real sum_GCV=0; 00859 for (int j=0;j<m;j++) 00860 { 00861 Mat yj = Y.column(j); 00862 real y2 = sumsquare(yj); 00863 transposeProduct(U,yj,Z); 00864 real z2 = pownorm(z); 00865 sum_GCV += GCVfromSVD(n, y2-z2, z, s); 00866 if (W) 00867 { 00868 for (int i=0;i<rank;i++) 00869 z[i] *= s[i]*singular_values[i]/weight_decay; 00870 transposeProduct((*W)(j),Vt,z); 00871 } 00872 } 00873 return sum_GCV; 00874 } 00875 00876 real GCVfromSVD(real n, real Y2minusZ2, Vec Z, Vec s) 00877 { 00878 int p = s.length(); 00879 real numerator=Y2minusZ2, denominator=n-p; 00880 for (int i=0;i<p;i++) 00881 { 00882 real si_zi = s[i]*Z[i]; 00883 numerator += si_zi*si_zi; 00884 denominator += s[i]; 00885 } 00886 real GCV = n*numerator / (denominator*denominator); 00887 return GCV; 00888 } 00889 00890 real ridgeRegressionByGCV(Mat X, Mat Y, Mat W, real& best_gcv, bool X_is_transposed, 00891 real initial_weight_decay_guess, int explore_threshold, real min_weight_decay) 00892 { 00893 int n = Y.length(); 00894 int m = Y.width(); 00895 int p, nx; 00896 if (X_is_transposed) 00897 { 00898 nx=X.width(); 00899 p=X.length(); 00900 } else { 00901 nx=X.length(); 00902 p=X.width(); 00903 } 00904 if (nx!=n) 00905 PLERROR("ridgeRegressionByGCV: incompatible arguments X and Y don't have same number of examples: %d and %d\n",nx,n); 00906 if (W.length()!=m) 00907 PLERROR("ridgeRegressionByGCV: incompatible arguments W and Y don't have compatible dimensions: %d and %d\n",W.length(),m); 00908 if (W.width()!=p) 00909 PLERROR("ridgeRegressionByGCV: incompatible arguments W and X don't have compatible dimensions: %d and %d\n",W.width(),p); 00910 Mat Xcopy, U, Vt, Z, squaredZ; 00911 Vec singular_values, eigen_values, s, y2, z2, best_s; 00912 Xcopy.resize(n,p); 00913 if (X_is_transposed) 00914 transpose(X, Xcopy); 00915 else 00916 Xcopy << X; 00917 int rank = min(n,p); 00918 U.resize(n,rank); 00919 Vt.resize(rank,p); 00920 singular_values.resize(rank); 00921 eigen_values.resize(rank); 00922 Z.resize(m,rank); 00923 squaredZ.resize(m,rank); 00924 s.resize(rank); 00925 best_s.resize(rank); 00926 y2.resize(m); 00927 z2.resize(m); 00928 PLASSERT( !Xcopy.hasMissing() ); 00929 SVD(Xcopy, U, singular_values, Vt, 'S', 2); 00930 for (int i=0;i<rank;i++) 00931 eigen_values[i] = singular_values[i]*singular_values[i]; 00932 00933 for (int j=0;j<m;j++) 00934 { 00935 Mat Yj = Y.column(j); 00936 Vec Zj = Z(j); 00937 y2[j] = sumsquare(Yj); 00938 transposeProduct(Zj.toMat(rank,1),U,Yj); 00939 z2[j] = pownorm(Zj); 00940 } 00941 00942 Vec gcv; 00943 gcv.resize(rank); 00944 gcv.fill(-1.); 00945 best_gcv = 1e38; 00946 real best_weight_decay = min_weight_decay; 00947 if (initial_weight_decay_guess<0) // TRY ALL EIGENVALUES 00948 // for (int i=1;i<=rank;i++) 00949 for (int i=1;i<rank;i++) 00950 { 00951 bool stop=false; 00952 real weight_decay = 0; 00953 /* 00954 if(i==rank) 00955 weight_decay = min_weight_decay; 00956 else 00957 */ 00958 weight_decay = exp(0.5*(pl_log(eigen_values[i-1])+pl_log(eigen_values[i]))); 00959 // perr << "Trying weight_decay = " << weight_decay; 00960 if (weight_decay < min_weight_decay) 00961 { 00962 weight_decay = min_weight_decay; 00963 stop = true; 00964 } 00965 for (int j=0;j<rank;j++) 00966 s[j] = weight_decay / (weight_decay + eigen_values[j]); 00967 real gcv_i = 0; 00968 for (int j=0;j<m;j++) 00969 gcv_i += GCVfromSVD(n,y2[j]-z2[j], Z(j), s); 00970 // perr << " -> gcv = " << gcv_i << endl; 00971 if (gcv_i<best_gcv) 00972 { 00973 best_gcv=gcv_i; 00974 best_weight_decay = weight_decay; 00975 best_s << s; 00976 } 00977 if (stop) 00978 break; 00979 } 00980 else // BE MORE GREEDY: DO A SEARCH FROM INITIAL GUESS 00981 { 00982 // first find eigenvalue closest to initial guess 00983 Vec weight_decays(rank+1); 00984 weight_decays[0] = max(min_weight_decay,eigen_values[0]); 00985 int stop = rank; 00986 for (int i=1;i<rank;i++) 00987 { 00988 if (i<stop) 00989 { 00990 weight_decays[i] = exp(0.5*(pl_log(eigen_values[i-1])+pl_log(eigen_values[i]))); 00991 if (weight_decays[i] < min_weight_decay) 00992 { 00993 stop = i; 00994 weight_decays[i] = min_weight_decay; 00995 } 00996 } 00997 else weight_decays[i] = min_weight_decay; 00998 } 00999 int closest = 0; 01000 real eval_dist = fabs(weight_decays[0]-initial_weight_decay_guess); 01001 for (int i=1;i<stop;i++) 01002 { 01003 real dist = fabs(weight_decays[i]-initial_weight_decay_guess); 01004 if (dist < eval_dist) 01005 { 01006 eval_dist = dist; 01007 closest = i; 01008 } 01009 } 01010 // how well are we doing there? 01011 best_weight_decay = weight_decays[closest]; 01012 01013 int best_i = closest; 01014 for (int i=0;i<rank;i++) 01015 s[i] = best_weight_decay / (best_weight_decay + eigen_values[i]); 01016 gcv[closest] = 0; 01017 for (int j=0;j<m;j++) 01018 gcv[closest] += GCVfromSVD(n,y2[j]-z2[j], Z(j), s); 01019 best_gcv = gcv[closest]; 01020 best_s << s; 01021 01022 // then explore around it, first one way, then the other, until it looks like we can't get better 01023 int left=closest; 01024 int right=closest; 01025 if (right<stop-1) 01026 right++; 01027 else 01028 left--; 01029 while (left>=0 || right<stop-1) 01030 { 01031 bool improved = false; 01032 if (gcv[left]<0) 01033 { 01034 for (int i=0;i<rank;i++) 01035 s[i] = weight_decays[left] / (weight_decays[left] + eigen_values[i]); 01036 gcv[left] = 0; 01037 for (int j=0;j<m;j++) 01038 gcv[left] += GCVfromSVD(n,y2[j]-z2[j], Z(j), s); 01039 if (gcv[left]<best_gcv) 01040 { 01041 best_gcv=gcv[left]; 01042 best_weight_decay = weight_decays[left]; 01043 best_i = left; 01044 best_s << s; 01045 01046 if (left>0) 01047 { 01048 left--; 01049 improved = true; 01050 } 01051 } 01052 } 01053 if (gcv[right]<0) 01054 { 01055 for (int i=0;i<rank;i++) 01056 s[i] = weight_decays[right] / (weight_decays[right] + eigen_values[i]); 01057 gcv[right] = 0; 01058 for (int j=0;j<m;j++) 01059 gcv[right] += GCVfromSVD(n,y2[j]-z2[j], Z(j), s); 01060 if (gcv[right]<best_gcv) 01061 { 01062 best_gcv=gcv[right]; 01063 best_weight_decay = weight_decays[right]; 01064 best_i = right; 01065 best_s << s; 01066 01067 if (right<stop-1) 01068 { 01069 right++; 01070 improved = true; 01071 } 01072 } 01073 } 01074 if (!improved) 01075 { 01076 if (best_i - left < right - best_i) 01077 { 01078 if (best_i - left < explore_threshold) 01079 { 01080 if (left>0) 01081 left--; 01082 else if (right - best_i < explore_threshold && right<stop-1) 01083 right++; 01084 else break; 01085 } 01086 else break; 01087 } 01088 else 01089 { 01090 if (right - best_i < explore_threshold) 01091 { 01092 if (right<stop-1) 01093 right++; 01094 else if (best_i - left < explore_threshold && left>0) 01095 left--; 01096 else break; 01097 } 01098 else break; 01099 } 01100 } 01101 } 01102 } 01103 01104 // compute weights for selected weight decay 01105 for (int j=0;j<m;j++) 01106 { 01107 Vec zj = Z(j); 01108 for (int i=0;i<rank;i++) 01109 zj[i] *= best_s[i]*singular_values[i]/best_weight_decay; 01110 transposeProduct(W(j),Vt,zj); 01111 } 01112 return best_weight_decay; 01113 } 01114 01115 01116 01117 real weightedRidgeRegressionByGCV(Mat X, Mat Y, Vec gamma, Mat W, real& best_gcv, real min_weight_decay) 01118 { 01119 int l = X.length(); 01120 01121 real gamma_sum = 0; 01122 if(gamma.length()==0) 01123 gamma_sum = l; 01124 else 01125 { 01126 gamma_sum = sum(gamma); 01127 for(int i=0; i<l; i++) 01128 { 01129 real s = sqrt(gamma[i]); 01130 X(i) *= s; 01131 Y(i) *= s; 01132 } 01133 } 01134 01135 int n = Y.length(); 01136 int m = Y.width(); 01137 int p = X.width(); 01138 int nx = X.length(); 01139 01140 if (nx!=n) 01141 PLERROR("ridgeRegressionByGCV: incompatible arguments X and Y don't have same number of examples: %d and %d\n",nx,n); 01142 if (W.length()!=m) 01143 PLERROR("ridgeRegressionByGCV: incompatible arguments W and Y don't have compatible dimensions: %d and %d\n",W.length(),m); 01144 if (W.width()!=p) 01145 PLERROR("ridgeRegressionByGCV: incompatible arguments W and X don't have compatible dimensions: %d and %d\n",W.width(),p); 01146 Mat U, Vt, Z, squaredZ; 01147 Vec singular_values, eigen_values, s, y2, z2, best_s; 01148 int rank = min(n,p); 01149 U.resize(n,rank); 01150 Vt.resize(rank,p); 01151 singular_values.resize(rank); 01152 eigen_values.resize(rank); 01153 Z.resize(m,rank); 01154 squaredZ.resize(m,rank); 01155 s.resize(rank); 01156 best_s.resize(rank); 01157 y2.resize(m); 01158 z2.resize(m); 01159 SVD(X, U, singular_values, Vt, 'S', 2); 01160 // perr << "Singular values: " << singular_values << endl; 01161 for (int i=0;i<rank;i++) 01162 eigen_values[i] = singular_values[i]*singular_values[i]; 01163 // perr << "Eigen values: " << eigen_values << endl; 01164 01165 for (int j=0;j<m;j++) 01166 { 01167 Mat Yj = Y.column(j); 01168 Vec Zj = Z(j); 01169 y2[j] = sumsquare(Yj); 01170 transposeProduct(Zj.toMat(rank,1),U,Yj); 01171 z2[j] = pownorm(Zj); 01172 } 01173 01174 Vec gcv; 01175 gcv.resize(rank); 01176 gcv.fill(-1.); 01177 best_gcv = 1e38; 01178 real best_weight_decay = min_weight_decay; 01179 01180 for (int i=1;i<=rank;i++) 01181 { 01182 bool stop=false; 01183 real weight_decay = 0; 01184 if(i==rank) 01185 weight_decay = min_weight_decay; 01186 else 01187 weight_decay = exp(0.5*(pl_log(eigen_values[i-1])+pl_log(eigen_values[i]))); 01188 // perr << "Trying weight_decay = " << weight_decay; 01189 if (weight_decay < min_weight_decay) 01190 { 01191 weight_decay = min_weight_decay; 01192 stop = true; 01193 } 01194 for (int j=0;j<rank;j++) 01195 s[j] = weight_decay / (weight_decay + eigen_values[j]); 01196 real gcv_i = 0; 01197 for (int j=0;j<m;j++) 01198 gcv_i += GCVfromSVD(gamma_sum,y2[j]-z2[j], Z(j), s); 01199 // perr << " -> gcv = " << gcv_i << endl; 01200 if (gcv_i<best_gcv) 01201 { 01202 best_gcv=gcv_i; 01203 best_weight_decay = weight_decay; 01204 best_s << s; 01205 } 01206 if (stop) 01207 break; 01208 } 01209 01210 // compute weights for selected weight decay 01211 for (int j=0;j<m;j++) 01212 { 01213 Vec zj = Z(j); 01214 for (int i=0;i<rank;i++) 01215 zj[i] *= best_s[i]*singular_values[i]/best_weight_decay; 01216 transposeProduct(W(j),Vt,zj); 01217 } 01218 return best_weight_decay; 01219 } 01220 01221 #if 0 01222 real LOOMSEofRidgeRegression(Mat inputs, Mat targets, Mat weights, real weight_decay, Vec eigenvalues, Mat eigenvectors, Mat predictions, 01223 Mat RHS_matrix, bool inputs_are_transposed) 01224 { 01225 int n_inputs = weights.width(); 01226 int n_outputs = targets.width(); 01227 int n_examples = targets.length(); 01228 int rank = eigenvalues.length(); 01229 // weights' = eigenvectors' * inv(diag(eigenvalues) + weight_decay*I) * eigenvectors' * inputs' * targets 01230 // = eigenvectors' * inv(diag(eigenvalues) + weight_decay*I) * RHS_matrix 01231 weights.clear(); 01232 real s=0; 01233 for (int k=0;k<rank;k++) 01234 { 01235 real* vk = eigenvectors[k]; 01236 real* RHSk = RHS_matrix[k]; 01237 real coeff = 1.0/(eigenvalues[k] + weight_decay); 01238 s += eigenvalues[k]*coeff; 01239 for (int i=0;i<n_outputs;i++) 01240 { 01241 real *wi = weights[i]; 01242 for (int j=0;j<n_inputs;j++) 01243 wi[j] += vk[j] * coeff * RHSk[i]; 01244 } 01245 } 01246 01247 if (inputs_are_transposed) 01248 transposeTransposeProduct(predictions, inputs, weights); 01249 else 01250 productTranspose(predictions, inputs, weights); 01251 real SSE = 0; 01252 for (int i=0;i<targets.length();i++) 01253 { 01254 real *ti = targets[i]; 01255 real *pi = predictions[i]; 01256 for (int j=0;j<targets.width();j++) 01257 { 01258 real diff = ti[j]-pi[j]; 01259 SSE += diff*diff; 01260 } 01261 } 01262 real denom = n_examples - s; 01263 if (denom<0) 01264 PLERROR("LOOMSEofRidgeRegression: mathematical error: should not get negative trace!"); 01265 if (denom==0) return 1e34; // some really large error... 01266 real LOOMSE = SSE / denom; 01267 return LOOMSE; 01268 } 01269 #endif 01270 01271 } // end of namespace PLearn 01272 01273 01274 /* 01275 Local Variables: 01276 mode:c++ 01277 c-basic-offset:4 01278 c-file-style:"stroustrup" 01279 c-file-offsets:((innamespace . 0)(inline-open . 0)) 01280 indent-tabs-mode:nil 01281 fill-column:79 01282 End: 01283 */ 01284 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :