PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 2002 Pascal Vincent 00005 00006 // Redistribution and use in source and binary forms, with or without 00007 // modification, are permitted provided that the following conditions are met: 00008 // 00009 // 1. Redistributions of source code must retain the above copyright 00010 // notice, this list of conditions and the following disclaimer. 00011 // 00012 // 2. Redistributions in binary form must reproduce the above copyright 00013 // notice, this list of conditions and the following disclaimer in the 00014 // documentation and/or other materials provided with the distribution. 00015 // 00016 // 3. The name of the authors may not be used to endorse or promote 00017 // products derived from this software without specific prior written 00018 // permission. 00019 // 00020 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00021 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00022 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00023 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00024 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00025 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00026 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00027 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00028 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00029 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00030 // 00031 // This file is part of the PLearn library. For more information on the PLearn 00032 // library, go to the PLearn Web site at www.plearn.org 00033 00034 00035 00036 00037 /* ******************************************************* 00038 * $Id: TMat_maths_specialisation.h 10125 2009-04-16 19:22:03Z nouiz $ 00039 * AUTHORS: Pascal Vincent & Yoshua Bengio & Rejean Ducharme 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00045 #ifndef TMat_maths_specialisation_INC 00046 #define TMat_maths_specialisation_INC 00047 00048 #include "TMat.h" 00049 #include <plearn/sys/Profiler.h> 00050 00051 namespace PLearn { 00052 using namespace std; 00053 00054 //#define USE_BLAS_SPECIALISATIONS 00055 00056 #ifdef USE_BLAS_SPECIALISATIONS 00057 #include "blas_proto.h" 00058 00059 #ifdef USEDOUBLE 00060 #define BLAS_COPY dcopy_ 00061 #define BLAS_MULT_ACC daxpy_ 00062 #define BLAS_SCALE dscal_ 00063 #define BLAS_SWAP dswap_ 00064 #else 00065 #define BLAS_COPY scopy_ 00066 #define BLAS_MULT_ACC saxpy_ 00067 #define BLAS_SCALE sscal_ 00068 #define BLAS_SWAP sswap_ 00069 #endif 00070 00071 /* Commented out. It is not clear exactly (1) if it works, (2) where it would 00072 * be called, and (3) if it is more efficient. 00074 // copy // 00076 inline real* copy(real* first, real* last, real* dest) 00077 { 00078 int n = last - first; 00079 int one = 1; 00080 BLAS_COPY(&n, first, &one, dest, &one); 00081 return dest + n; 00082 } 00083 */ 00084 00086 // multiplyAcc // 00089 inline void multiplyAcc(const Vec& vec, const Vec& x, real scale) 00090 { 00091 int n = vec.length(); 00092 PLASSERT( vec.length() == x.length() ); 00093 int one = 1; 00094 BLAS_MULT_ACC(&n, &scale, x.data(), &one, vec.data(), &one); 00095 } 00096 00097 inline void multiplyAcc(const Mat& mat, const Mat& x, real scale) 00098 { 00099 PLASSERT( mat.length() == x.length() && mat.width() == x.width() ); 00100 00101 int one = 1; 00102 int w = mat.width(); // == x.width() 00103 int mod_mat = mat.mod(); 00104 real* data_mat = mat.data(); 00105 int mod_x = x.mod(); 00106 real* data_x = x.data(); 00107 00108 if( mat.isEmpty() ) // x.isEmpty() too 00109 return; 00110 00111 if( w == mod_mat && w == mod_x ) 00112 { 00113 // The two matrices have contiguous rows, we do it in one call 00114 int n = mat.size(); // == x.size() 00115 BLAS_MULT_ACC(&n, &scale, data_x, &one, data_mat, &one); 00116 } 00117 else if( w == 1 ) 00118 { 00119 // There is only one column, we do it in one call 00120 int l = mat.length(); // == x.length(), == mat.size() 00121 BLAS_MULT_ACC(&l, &scale, data_x, &mod_x, data_mat, &mod_mat); 00122 } 00123 else 00124 { 00125 // We iterate over the rows 00126 int l = mat.length(); // == x.length() 00127 for( int i=0 ; i<l ; i++, data_mat += mod_mat, data_x += mod_x ) 00128 BLAS_MULT_ACC(&w, &scale, data_x, &one, data_mat, &one); 00129 } 00130 } 00131 00132 /* 00133 inline void operator+=(const TVec<double>& vec, const TVec<double>& x) 00134 { multiplyAcc(vec,x,1.); } 00135 */ 00136 00138 // operator*= // 00140 inline void operator*=(const Vec& vec, real factor) 00141 { 00142 int mod = 1; 00143 int n = vec.length(); 00144 if ( n != 0 ) 00145 BLAS_SCALE(&n, &factor, vec.data(), &mod); 00146 } 00147 00148 inline void operator*=(const Mat& mat, real factor) 00149 { 00150 if (mat.isEmpty()) 00151 return; 00152 00153 int one = 1; 00154 int w = mat.width(); 00155 int mod = mat.mod(); 00156 real* data = mat.data(); 00157 if (w == mod) 00158 { 00159 // The rows are contiguous, so we can do it in one call 00160 int n = mat.size(); 00161 BLAS_SCALE(&n, &factor, data, &one); 00162 } 00163 else 00164 { 00165 // We iterate over the rows 00166 int l = mat.length(); 00167 for( int i=0 ; i<l ; i++, data += mod ) 00168 BLAS_SCALE(&w, &factor, data, &one); 00169 } 00170 } 00171 00173 // swapRows // 00175 inline void swapRows(const Mat& mat, int i, int j) 00176 { 00177 if (i == j) 00178 return; 00179 real* mat_row_i = mat[i]; 00180 real* mat_row_j = mat[j]; 00181 int one = 1; 00182 int n = mat.width(); 00183 BLAS_SWAP(&n, mat_row_i, &one, mat_row_j, &one); 00184 } 00185 00187 // productScaleAcc // 00190 // (Will use the transpose of A and/or B instead, if you set the corresponding 00191 // flags to true) 00192 inline void productScaleAcc(const TMat<double>& C, 00193 const TMat<double>& A, bool transposeA, 00194 const TMat<double>& B, bool transposeB, 00195 double alpha, double beta) 00196 { 00197 Profiler::pl_profile_start("productScaleAcc(dgemm) specialisation"); 00198 #ifdef BOUNDCHECK 00199 int l2; 00200 #endif 00201 int l1, w1, w2; 00202 char transa, transb; 00203 if(transposeA) 00204 { 00205 l1 = A.width(); 00206 w1 = A.length(); 00207 transa = 'T'; 00208 } 00209 else 00210 { 00211 l1 = A.length(); 00212 w1 = A.width(); 00213 transa = 'N'; 00214 } 00215 if(transposeB) 00216 { 00217 #ifdef BOUNDCHECK 00218 l2 = B.width(); 00219 #endif 00220 w2 = B.length(); 00221 transb = 'T'; 00222 } 00223 else 00224 { 00225 #ifdef BOUNDCHECK 00226 l2 = B.length(); 00227 #endif 00228 w2 = B.width(); 00229 transb = 'N'; 00230 } 00231 00232 #ifdef BOUNDCHECK 00233 if (w1!=l2 || C.length()!=l1 || C.width()!=w2) 00234 PLERROR("productScaleAcc, incompatible arguments:\n" 00235 "(%dx%d) <- %s(%dx%d) . %s(%dx%d)", 00236 C.length(), C.width(), 00237 transposeA?"transpose":"", A.length(), A.width(), 00238 transposeB?"transpose":"", B.length(), B.width()); 00239 #endif 00240 00241 int lda = A.mod(); 00242 int ldb = B.mod(); 00243 int ldc = C.mod(); 00244 00245 if (A.isEmpty() || B.isEmpty() || C.isEmpty()) { 00246 // Size zero: no need to bother computing anything. 00247 // In such a case, the result of the matrix-matrix multiplication, if 00248 // not empty, is necessarily zero, since R^0 = {0}. 00249 if (!C.isEmpty()) 00250 C *= beta; 00251 return; 00252 } 00253 00254 dgemm_(&transb, &transa, &w2, &l1, &w1, &alpha, B.data(), &ldb, A.data(), 00255 &lda, &beta, C.data(), &ldc); 00256 Profiler::pl_profile_end("productScaleAcc(dgemm) specialisation"); 00257 } 00258 00260 // (will use the transposed of A instead if tranposeA is true) 00261 inline void productScaleAcc(const TVec<double>& y, 00262 const TMat<double>& A, bool transposeA, 00263 const TVec<double>& x, double alpha, double beta) 00264 { 00265 Profiler::pl_profile_start("productScaleAcc(dgemv_) specialisation"); 00266 #ifdef BOUNDCHECK 00267 if(!transposeA) 00268 { 00269 if(A.length()!=y.length() || A.width()!=x.length()) 00270 PLERROR("productScaleAcc, incompatible arguments:\n" 00271 "Vec(%d) <- Mat(%d,%d) . Vec(%d)", 00272 y.length(), A.length(), A.width(), x.length()); 00273 } 00274 else 00275 { 00276 if(A.length()!=x.length() || A.width()!=y.length()) 00277 PLERROR("productScaleAcc, incompatible arguments:\n" 00278 "Vec(%d) <- Mat(%d,%d)' . Vec(%d)", 00279 y.length(), A.length(), A.width(), x.length()); 00280 } 00281 #endif 00282 00283 int one = 1; 00284 char trans = transposeA ?'N' :'T'; 00285 int lda = A.mod(); 00286 int m = A.width(); 00287 int n = A.length(); 00288 00289 if (A.isEmpty() || x.isEmpty() || y.isEmpty()) { 00290 // Size zero: no need to bother computing anything. 00291 // In such a case, the result of the matrix-vector multiplication, if 00292 // not empty, is necessarily zero, since R^0 = {0}. 00293 if (!y.isEmpty()) 00294 y *= beta; 00295 return; 00296 } 00297 00298 dgemv_(&trans, &m, &n, &alpha, A.data(), &lda, x.data(), &one, &beta, 00299 y.data(), &one); 00300 Profiler::pl_profile_end("productScaleAcc(dgemv_) specialisation"); 00301 } 00302 00304 inline void externalProductScaleAcc(const TMat<double>& A, 00305 const TVec<double>& x, 00306 const TVec<double>& y, double alpha) 00307 { 00308 Profiler::pl_profile_start("externalProductScaleAcc(dger_) double specialisation"); 00309 00310 #ifdef BOUNDCHECK 00311 if(A.length()!=x.length() || A.width()!=y.length()) 00312 PLERROR("In externalProductScaleAcc, incompatible dimensions:\n" 00313 "Mat(%d,%d) <- Vec(%d).Vec(%d)'", 00314 A.length(), A.width(), x.length(), y.length()); 00315 if(A.mod()<=0 || A.width()<=0) 00316 PLERROR("In externalProductScaleAcc, destination matrix has a width " 00317 "(%d) or a mod (%d) <= 0", A.width(), A.mod()); 00318 #endif 00319 int one = 1; 00320 int lda = A.mod(); 00321 int m = A.width(); 00322 int n = A.length(); 00323 00324 if (A.isNull() || x.isNull() || y.isNull() // Size zero ; don't bother 00325 || m == 0 || n == 0) // with actual calculation 00326 return; 00327 00328 dger_(&m, &n, &alpha, y.data(), &one, x.data(), &one, A.data(), &lda); 00329 Profiler::pl_profile_end("externalProductScaleAcc(dger_) double specialisation"); 00330 } 00331 00332 inline void externalProductAcc(const TMat<double>& A, 00333 const TVec<double>& x, 00334 const TVec<double>& y) 00335 { externalProductScaleAcc(A, x, y, 1.); } 00336 00337 inline void product(const TVec<double>& vec, const TMat<double>& m, 00338 const TVec<double>& v) 00339 { productScaleAcc(vec, m, false, v, 1., 0.); } 00340 00341 inline void productAcc(const TVec<double>& vec, const TMat<double>& m, 00342 const TVec<double>& v) 00343 { productScaleAcc(vec, m, false, v, 1., 1.); } 00344 00345 inline void productScaleAcc(const TVec<double>& vec, const TMat<double>& m, 00346 const TVec<double>& v, double alpha, double beta) 00347 { productScaleAcc(vec, m, false, v, alpha, beta); } 00348 00349 inline void transposeProduct(const TVec<double>& vec, const TMat<double>& m, 00350 const TVec<double>& v) 00351 { productScaleAcc(vec, m, true, v, 1., 0.); } 00352 00353 inline void transposeProductAcc(const TVec<double>& vec, const TMat<double>& m, 00354 const TVec<double>& v) 00355 { productScaleAcc(vec, m, true, v, 1., 1.); } 00356 00357 inline void transposeProductScaleAcc(const TVec<double>& vec, 00358 const TMat<double>& m, 00359 const TVec<double>& v, 00360 double alpha, double beta) 00361 { productScaleAcc(vec, m, true, v, alpha, beta); } 00362 00363 inline void product(const TMat<double>& mat, const TMat<double>& m1, 00364 const TMat<double>& m2) 00365 { productScaleAcc(mat, m1, false, m2, false, 1., 0.); } 00366 00367 inline void transposeTransposeProduct(const TMat<double>& mat, 00368 const TMat<double>& m1, 00369 const TMat<double>& m2) 00370 { productScaleAcc(mat, m1, true, m2, true, 1., 0.); } 00371 00372 inline void transposeProduct(const TMat<double>& mat, const TMat<double>& m1, 00373 const TMat<double>& m2) 00374 { productScaleAcc(mat, m1, true, m2, false, 1., 0.); } 00375 00376 inline void productTranspose(const TMat<double>& mat, const TMat<double>& m1, 00377 const TMat<double>& m2) 00378 { productScaleAcc(mat, m1, false, m2, true, 1., 0.); } 00379 00380 inline void productAcc(const TMat<double>& mat, const TMat<double>& m1, 00381 const TMat<double>& m2) 00382 { productScaleAcc(mat, m1, false, m2, false, 1., 1.); } 00383 00384 inline void productScaleAcc(const TMat<double>& mat, 00385 const TMat<double>& m1, const TMat<double>& m2, 00386 double alpha, double beta) 00387 { productScaleAcc(mat, m1, false, m2, false, alpha, beta); } 00388 00389 inline void transposeTransposeProductAcc(const TMat<double>& mat, 00390 const TMat<double>& m1, 00391 const TMat<double>& m2) 00392 { productScaleAcc(mat, m1, true, m2, true, 1., 1.); } 00393 00394 inline void transposeTransposeProductScaleAcc(const TMat<double>& mat, 00395 const TMat<double>& m1, 00396 const TMat<double>& m2, 00397 double alpha, double beta) 00398 { productScaleAcc(mat, m1, true, m2, true, alpha, beta); } 00399 00400 inline void transposeProductAcc(const TMat<double>& mat, 00401 const TMat<double>& m1, 00402 const TMat<double>& m2) 00403 { productScaleAcc(mat, m1, true, m2, false, 1., 1.); } 00404 00405 inline void transposeProductScaleAcc(const TMat<double>& mat, 00406 const TMat<double>& m1, 00407 const TMat<double>& m2, 00408 double alpha, double beta) 00409 { productScaleAcc(mat, m1, true, m2, false, alpha, beta); } 00410 00411 inline void productTransposeAcc(const TMat<double>& mat, 00412 const TMat<double>& m1, 00413 const TMat<double>& m2) 00414 { productScaleAcc(mat, m1, false, m2, true, 1., 1.); } 00415 00416 inline void productTransposeScaleAcc(const TMat<double>& mat, 00417 const TMat<double>& m1, 00418 const TMat<double>& m2, 00419 double alpha, double beta) 00420 { productScaleAcc(mat, m1, false, m2, true, alpha, beta); } 00421 00422 00423 00424 // float 00425 00426 00428 // (Will use the transpose of A and/or B instead, if you set the corresponding 00429 // flags to true) 00430 inline void productScaleAcc(const TMat<float>& C, 00431 const TMat<float>& A, bool transposeA, 00432 const TMat<float>& B, bool transposeB, 00433 float alpha, float beta) 00434 { 00435 Profiler::pl_profile_start("productScaleAcc(sgemm) specialisation"); 00436 00437 #ifdef BOUNDCHECK 00438 int l2; 00439 #endif 00440 int l1, w1, w2; 00441 char transa, transb; 00442 if(transposeA) 00443 { 00444 l1 = A.width(); 00445 w1 = A.length(); 00446 transa = 'T'; 00447 } 00448 else 00449 { 00450 l1 = A.length(); 00451 w1 = A.width(); 00452 transa = 'N'; 00453 } 00454 if(transposeB) 00455 { 00456 #ifdef BOUNDCHECK 00457 l2 = B.width(); 00458 #endif 00459 w2 = B.length(); 00460 transb = 'T'; 00461 } 00462 else 00463 { 00464 #ifdef BOUNDCHECK 00465 l2 = B.length(); 00466 #endif 00467 w2 = B.width(); 00468 transb = 'N'; 00469 } 00470 00471 #ifdef BOUNDCHECK 00472 if (w1!=l2 || C.length()!=l1 || C.width()!=w2) 00473 PLERROR("productScaleAcc, incompatible arguments:\n" 00474 "(%dx%d) <- %s(%dx%d) . %s(%dx%d)", 00475 C.length(), C.width(), 00476 transposeA?"transpose":"", A.length(), A.width(), 00477 transposeB?"transpose":"", B.length(), B.width()); 00478 #endif 00479 00480 int lda = A.mod(); 00481 int ldb = B.mod(); 00482 int ldc = C.mod(); 00483 00484 if (A.isNull() || B.isNull() || C.isNull()) // Size zero ; don't bother 00485 return; // with actual calculation 00486 00487 sgemm_(&transb, &transa, &w2, &l1, &w1, &alpha, B.data(), &ldb, A.data(), 00488 &lda, &beta, C.data(), &ldc); 00489 Profiler::pl_profile_end("productScaleAcc(sgemm) specialisation"); 00490 } 00491 00493 // (will use the transposed of A instead if tranposeA is true) 00494 inline void productScaleAcc(const TVec<float>& y, 00495 const TMat<float>& A, bool transposeA, 00496 const TVec<float>& x, float alpha, float beta) 00497 { 00498 Profiler::pl_profile_start("productScaleAcc(sger_) specialisation"); 00499 #ifdef BOUNDCHECK 00500 if(!transposeA) 00501 { 00502 if(A.length()!=y.length() || A.width()!=x.length()) 00503 PLERROR("productScaleAcc, incompatible arguments:\n" 00504 "Vec(%d) <- Mat(%d,%d) . Vec(%d)", 00505 y.length(), A.length(), A.width(), x.length()); 00506 } 00507 else 00508 { 00509 if(A.length()!=x.length() || A.width()!=y.length()) 00510 PLERROR("productScaleAcc, incompatible arguments:\n" 00511 "Vec(%d) <- Mat(%d,%d)' . Vec(%d)", 00512 y.length(), A.length(), A.width(), x.length()); 00513 } 00514 #endif 00515 00516 int one = 1; 00517 char trans = transposeA ?'N' :'T'; 00518 int lda = A.mod(); 00519 int m = A.width(); 00520 int n = A.length(); 00521 00522 sgemv_(&trans, &m, &n, &alpha, A.data(), &lda, x.data(), &one, &beta, 00523 y.data(), &one); 00524 Profiler::pl_profile_end("productScaleAcc(sger_) specialisation"); 00525 } 00526 00528 inline void externalProductScaleAcc(const TMat<float>& A, const TVec<float>& x, 00529 const TVec<float>& y, float alpha) 00530 { 00531 Profiler::pl_profile_start("externalProductScaleAcc(sger_) float specialisation"); 00532 #ifdef BOUNDCHECK 00533 if(A.length()!=x.length() || A.width()!=y.length()) 00534 PLERROR("In externalProductScaleAcc, incompatible dimensions:\n" 00535 "Mat(%d,%d) <- Vec(%d).Vec(%d)'", 00536 A.length(), A.width(), x.length(), y.length()); 00537 #endif 00538 int one = 1; 00539 int lda = A.mod(); 00540 int m = A.width(); 00541 int n = A.length(); 00542 00543 if (A.isNull() || x.isNull() || y.isNull()) // Size zero ; don't bother 00544 return; // with actual calculation 00545 00546 sger_(&m, &n, &alpha, y.data(), &one, x.data(), &one, A.data(), &lda); 00547 Profiler::pl_profile_end("externalProductScaleAcc(sger_) float specialisation"); 00548 } 00549 00550 inline void externalProductAcc(const TMat<float>& A, const TVec<float>& x, 00551 const TVec<float>& y) 00552 { externalProductScaleAcc(A, x, y, 1.); } 00553 00554 inline void product(const TVec<float>& vec, const TMat<float>& m, 00555 const TVec<float>& v) 00556 { productScaleAcc(vec, m, false, v, 1., 0.); } 00557 00558 inline void productAcc(const TVec<float>& vec, const TMat<float>& m, 00559 const TVec<float>& v) 00560 { productScaleAcc(vec, m, false, v, 1., 1.); } 00561 00562 inline void productScaleAcc(const TVec<float>& vec, const TMat<float>& m, 00563 const TVec<float>& v, float alpha, float beta) 00564 { productScaleAcc(vec, m, false, v, alpha, beta); } 00565 00566 inline void transposeProduct(const TVec<float>& vec, const TMat<float>& m, 00567 const TVec<float>& v) 00568 { productScaleAcc(vec, m, true, v, 1., 0.); } 00569 00570 inline void transposeProductAcc(const TVec<float>& vec, const TMat<float>& m, 00571 const TVec<float>& v) 00572 { productScaleAcc(vec, m, true, v, 1., 1.); } 00573 00574 inline void transposeProductScaleAcc(const TVec<float>& vec, 00575 const TMat<float>& m, 00576 const TVec<float>& v, 00577 float alpha, float beta) 00578 { productScaleAcc(vec, m, true, v, alpha, beta); } 00579 00580 inline void product(const TMat<float>& mat, const TMat<float>& m1, 00581 const TMat<float>& m2) 00582 { productScaleAcc(mat, m1, false, m2, false, 1., 0.); } 00583 00584 inline void transposeTransposeProduct(const TMat<float>& mat, 00585 const TMat<float>& m1, 00586 const TMat<float>& m2) 00587 { productScaleAcc(mat, m1, true, m2, true, 1., 0.); } 00588 00589 inline void transposeProduct(const TMat<float>& mat, const TMat<float>& m1, 00590 const TMat<float>& m2) 00591 { productScaleAcc(mat, m1, true, m2, false, 1., 0.); } 00592 00593 inline void productTranspose(const TMat<float>& mat, const TMat<float>& m1, 00594 const TMat<float>& m2) 00595 { productScaleAcc(mat, m1, false, m2, true, 1., 0.); } 00596 00597 inline void productAcc(const TMat<float>& mat, const TMat<float>& m1, 00598 const TMat<float>& m2) 00599 { productScaleAcc(mat, m1, false, m2, false, 1., 1.); } 00600 00601 inline void productScaleAcc(const TMat<float>& mat, 00602 const TMat<float>& m1, const TMat<float>& m2, 00603 float alpha, float beta) 00604 { productScaleAcc(mat, m1, false, m2, false, alpha, beta); } 00605 00606 inline void transposeTransposeProductAcc(const TMat<float>& mat, 00607 const TMat<float>& m1, 00608 const TMat<float>& m2) 00609 { productScaleAcc(mat, m1, true, m2, true, 1., 1.); } 00610 00611 inline void transposeTransposeProductScaleAcc(const TMat<float>& mat, 00612 const TMat<float>& m1, 00613 const TMat<float>& m2, 00614 float alpha, float beta) 00615 { productScaleAcc(mat, m1, true, m2, true, alpha, beta); } 00616 00617 inline void transposeProductAcc(const TMat<float>& mat, const TMat<float>& m1, 00618 const TMat<float>& m2) 00619 { productScaleAcc(mat, m1, true, m2, false, 1., 1.); } 00620 00621 inline void transposeProductScaleAcc(const TMat<float>& mat, 00622 const TMat<float>& m1, 00623 const TMat<float>& m2, 00624 float alpha, float beta) 00625 { productScaleAcc(mat, m1, true, m2, false, alpha, beta); } 00626 00627 inline void productTransposeAcc(const TMat<float>& mat, const TMat<float>& m1, 00628 const TMat<float>& m2) 00629 { productScaleAcc(mat, m1, false, m2, true, 1., 1.); } 00630 00631 inline void productTransposeScaleAcc(const TMat<float>& mat, 00632 const TMat<float>& m1, 00633 const TMat<float>& m2, 00634 float alpha, float beta) 00635 { productScaleAcc(mat, m1, false, m2, true, alpha, beta); } 00636 00637 00638 #endif // USE_BLAS_SPECIALISATIONS 00639 00640 // strange little functions of Yoshua for optimized computations in neural nets 00641 00642 #define UNFOLD 00643 00644 // dot product, assumes that s is already initialized 00645 // return s + sum_{i=0}^{n-1} x[i]*y[i] 00646 inline real dot_product(real s,real* x,real* y,int n) 00647 { 00648 #ifdef UNFOLD 00649 int n4 = (n >> 2) << 2; 00650 int i=0; 00651 for (;i<n4;i+=4) 00652 { 00653 real s1 = x[i] * y[i]; 00654 real s2 = x[i+1] * y[i+1]; 00655 real s3 = x[i+2] * y[i+2]; 00656 real s4 = x[i+3] * y[i+3]; 00657 s += s1+s2+s3+s4; 00658 } 00659 for (;i<n;i++) 00660 s += x[i] * y[i]; 00661 #else 00662 for (int i=0;i<n;i++) 00663 s += *x++ * *y++; 00664 #endif 00665 return s; 00666 } 00667 00668 // norman: sse is not supported in WIN32 00669 #if defined(SGI) && !defined(WIN32) 00670 #include <plearn/sys/sse.h> 00671 #endif //ndef SGI 00672 00673 //#define BUNDLE 00674 // dx[j] += sum_i dy[i]*w(i,j) 00675 // w(i,j) -= learning_rate*(dy[i]*x[j] + weight_decay*w(i,j)) 00676 inline void bprop_update_layer(real* dy, real* x, real* dx, real* w, 00677 int n_y, int n_x, real learning_rate, 00678 real weight_decay) 00679 { 00680 #ifdef BUNDLE 00681 int nx8 = (n_x >> 3) << 3; 00682 int j8=0; 00683 real* xj = x; 00684 real* dxj = dx; 00685 int delta_w1 = n_x - 8; 00686 int delta_w2 = n_y*n_x - 8; 00687 real* w_ij = w; 00688 for (;j8<nx8;j8+=8,xj+=8,dxj+=8,w_ij-=delta_w2) 00689 { 00690 real* dy_ = dy; 00691 for (int i=0;i<n_y;i++) 00692 { 00693 real* next_w = w_ij + delta_w1; 00694 prefetchnta(*next_w); 00695 real* x_j = xj; 00696 real* dx_j = dxj; 00697 real d_y = dy_[i]; 00698 *dx_j += d_y * *w_ij; 00699 *w_ij -= learning_rate*(d_y * *x_j + weight_decay * *w_ij); 00700 dx_j[1] += d_y * w_ij[1]; 00701 w_ij[1] -= learning_rate*(d_y * x_j[1] + weight_decay * w_ij[1]); 00702 dx_j[2] += d_y * w_ij[2]; 00703 w_ij[2] -= learning_rate*(d_y * x_j[2] + weight_decay * w_ij[2]); 00704 dx_j[3] += d_y * w_ij[3]; 00705 w_ij[3] -= learning_rate*(d_y * x_j[3] + weight_decay * w_ij[3]); 00706 dx_j[4] += d_y * w_ij[4]; 00707 w_ij[4] -= learning_rate*(d_y * x_j[4] + weight_decay * w_ij[4]); 00708 dx_j[5] += d_y * w_ij[5]; 00709 w_ij[5] -= learning_rate*(d_y * x_j[5] + weight_decay * w_ij[5]); 00710 dx_j[6] += d_y * w_ij[6]; 00711 w_ij[6] -= learning_rate*(d_y * x_j[6] + weight_decay * w_ij[6]); 00712 dx_j[7] += d_y * w_ij[7]; 00713 w_ij[7] -= learning_rate*(d_y * x_j[7] + weight_decay * w_ij[7]); 00714 w_ij = next_w; 00715 } 00716 } 00717 for (int i=0;i<n_y;i++) 00718 { 00719 real dy_i = dy[i]; 00720 real *dx_j = dx; 00721 real *x_j = x; 00722 for (int j=j8;j<n_x;j++) 00723 { 00724 *dx_j++ += dy_i * *w; 00725 *w++ -= learning_rate*(dy_i * *x_j++ + weight_decay * *w); 00726 } 00727 } 00728 00729 #else 00730 #ifdef UNFOLD 00731 int nx4 = (n_x >> 2) << 2; 00732 real *w_i = w; 00733 for (int i=0;i<n_y;i++,w_i+=n_x) 00734 { 00735 real dy_i = dy[i]; 00736 real *dx_j = dx; 00737 real *x_j = x; 00738 int j=0; 00739 for (;j<nx4;j+=4) 00740 { 00741 real w_ij0 = w_i[j]; 00742 real w_ij1 = w_i[j+1]; 00743 real w_ij2 = w_i[j+2]; 00744 real w_ij3 = w_i[j+3]; 00745 dx_j[j] += dy_i * w_ij0; 00746 dx_j[j+1] += dy_i * w_ij1; 00747 dx_j[j+2] += dy_i * w_ij2; 00748 dx_j[j+3] += dy_i * w_ij3; 00749 w_i[j] -= learning_rate*(dy_i * x_j[j] + weight_decay * w_ij0); 00750 w_i[j+1] -= learning_rate*(dy_i * x_j[j+1] + weight_decay * w_ij1); 00751 w_i[j+2] -= learning_rate*(dy_i * x_j[j+2] + weight_decay * w_ij2); 00752 w_i[j+3] -= learning_rate*(dy_i * x_j[j+3] + weight_decay * w_ij3); 00753 } 00754 for (;j<n_x;j++) 00755 { 00756 real w_ij = w_i[j]; 00757 dx_j[j] += dy_i * w_ij; 00758 w_i[j] -= learning_rate*(dy_i * x_j[j] + weight_decay * w_ij); 00759 } 00760 } 00761 #else 00762 for (int i=0;i<n_y;i++) 00763 { 00764 real dy_i = dy[i]; 00765 real *dx_j = dx; 00766 real *x_j = x; 00767 for (int j=0;j<n_x;j++) 00768 { 00769 *dx_j++ += dy_i * *w; 00770 *w++ -= learning_rate*(dy_i * *x_j++ + weight_decay * *w); 00771 } 00772 } 00773 #endif 00774 #endif 00775 } 00776 00777 } // end of namespace PLearn 00778 00779 #endif 00780 00781 00782 /* 00783 Local Variables: 00784 mode:c++ 00785 c-basic-offset:4 00786 c-file-style:"stroustrup" 00787 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00788 indent-tabs-mode:nil 00789 fill-column:79 00790 End: 00791 */ 00792 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :