PLearn 0.1
VMat_linalg.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // VMat_linalg.cc
00004 //
00005 // Copyright (C) 2004 Pascal Vincent
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************
00036  * $Id: VMat_linalg.cc 9648 2008-11-05 21:52:35Z ducharme $
00037  ******************************************************* */
00038 
00039 // Authors: Pascal Vincent
00040 
00044 // From PLearn
00045 #include <plearn/base/Object.h>
00046 #include <plearn/base/ProgressBar.h>
00047 #include "VMat_linalg.h"
00048 #include <plearn/math/TMat_maths.h>
00049 #include "VMat.h"
00050 #include "ExtendedVMatrix.h"
00051 #include <plearn/math/plapack.h>      
00052 
00053 namespace PLearn {
00054 using namespace std;
00055 
00056 Mat transposeProduct(VMat m)
00057 {
00058     Mat result(m.width(),m.width());
00059 
00060     Vec v(m.width());
00061     Mat vrowmat = rowmatrix(v);
00062 
00063     for(int i=0; i<m.length(); i++)
00064     {
00065         m->getRow(i,v);
00066         transposeProductAcc(result, vrowmat,vrowmat);
00067     }
00068     return result;
00069 }
00070 
00071 Mat transposeProduct(VMat m1, VMat m2)
00072 {
00073     if(m1.length()!=m2.length())
00074         PLERROR("in Mat transposeProduct(VMat m1, VMat m2) arguments have incompatible dimensions");
00075 
00076     Mat result(m1.width(),m2.width());
00077 
00078     Vec v1(m1.width());
00079     Vec v2(m2.width());
00080     Mat v1rowmat = rowmatrix(v1);
00081     Mat v2rowmat = rowmatrix(v2);
00082 
00083     for(int i=0; i<m1.length(); i++)
00084     {
00085         m1->getRow(i,v1);
00086         m2->getRow(i,v2);
00087         transposeProductAcc(result, v1rowmat,v2rowmat);
00088     }
00089     return result;
00090 }
00091 
00092 Vec transposeProduct(VMat m1, Vec v2)
00093 {
00094     if(m1.length()!=v2.length())
00095         PLERROR("in Mat transposeProduct(VMat m1, Vec v2) arguments have incompatible dimensions");
00096 
00097     Vec result(m1.width(),1);
00098     result.clear();
00099 
00100     Vec v1(m1.width());
00101     for(int i=0; i<m1.length(); i++)
00102     {
00103         m1->getRow(i,v1);
00104         result += v1 * v2[i];
00105     }
00106     return result;
00107 }
00108 
00109 Mat productTranspose(VMat m1, VMat m2)
00110 {
00111     if(m1.width()!=m2.width())
00112         PLERROR("in Mat productTranspose(VMat m1, VMat m2) arguments have incompatible dimensions");
00113 
00114     int m1l = (m1.length());
00115     int m2l = (m2.length());
00116     int w = (m1.width());
00117     Mat result(m1l,m2l);
00118 
00119     Vec v1(w);
00120     Vec v2(w);
00121 
00122     for(int i=0; i<m1l; i++)
00123     {
00124         m1->getRow(i,v1);
00125         for(int j=0; j<m2l; j++)
00126         {
00127             m2->getRow(j,v2);
00128             result(i,j) = dot(v1,v2);
00129         }
00130     }
00131     return result;
00132 }
00133 
00134 Mat product(Mat m1, VMat m2)
00135 {
00136     if(m1.width()!=m2.length())
00137         PLERROR("in Mat product(VMat m1, VMat m2) arguments have incompatible dimensions");
00138 
00139     Mat result(m1.length(),m2.width());
00140     result.clear();
00141 
00142     Vec v2(m2.width());
00143     Mat v2rowmat = rowmatrix(v2);
00144 
00145     for(int i=0; i<m1.width(); i++)
00146     {
00147         m2->getRow(i,v2);
00148         productAcc(result, m1.column(i), v2rowmat);
00149     }
00150     return result;
00151 }
00152 
00153 VMat transpose(VMat m1)
00154 {
00155     return VMat(transpose(m1.toMat()));
00156 }
00157 
00158 real linearRegression(
00159     VMat inputs, VMat outputs, real weight_decay, Mat theta_t,
00160     bool use_precomputed_XtX_XtY, Mat XtX, Mat XtY,
00161     real& sum_squared_Y, Vec& outputwise_sum_squared_Y,
00162     bool return_squared_loss, int verbose_every, bool cholesky,
00163     int apply_decay_from)
00164 {
00165     if (outputs.length()!=inputs.length())
00166         PLERROR("linearRegression: inputs.length()=%d while outputs.length()=%d",inputs.length(),outputs.length());
00167     if (theta_t.length()!=inputs.width() || theta_t.width()!=outputs.width())
00168         PLERROR("linearRegression: theta_t(%d,%d) should be (%dx%d)",
00169                 theta_t.length(),theta_t.width(),inputs.width(),outputs.width());
00170 
00171     int inputsize = inputs.width();
00172     int targetsize = outputs.width();
00173 
00174     if(XtX.length()!=inputsize || XtX.width()!=inputsize)
00175         PLERROR("In linearRegression: XtX should have dimensions %dx%d (inputs.width())x(inputs.width())",
00176                 inputsize,inputsize);
00177     if(XtY.length()!=inputsize || XtY.width()!=targetsize)
00178         PLERROR("In linearRegression: XtY should have dimensions %dx%d (inputs.width())x(outputs.width())",
00179                 inputsize,targetsize);
00180 
00181     if(!use_precomputed_XtX_XtY) // then compute them
00182     {
00183         VMat X = inputs; // new ExtendedVMatrix(inputs,0,0,1,0,1.0); // prepend a first column of ones
00184         VMat Y = outputs;
00185         outputwise_sum_squared_Y.resize(targetsize);
00186         outputwise_sum_squared_Y.fill(0.0);
00187 
00188         // *************
00189         // Do efficiently the following:
00190         // XtX << transposeProduct(X); // '<<' to copy elements (as transposeProduct returns a new matrix)
00191         // XtY << transposeProduct(X,Y); // same thing (remember '=' for Mat never copies elements)
00192         XtX.clear();
00193         XtY.clear();
00194         sum_squared_Y=0;
00195         Vec x(X.width());
00196         Vec y(Y.width());
00197         int l=X.length();
00198 
00199         // Display progress bar iff we have some verbosity
00200         PP<ProgressBar> pb(
00201             verbose_every?
00202             new ProgressBar("Performing Unweighted Linear Regression", l) : 0);
00203 
00204         for(int i=0; i<l; i++)
00205         {
00206             if (pb)
00207                 pb->update(i);
00208 
00209             X->getRow(i,x);
00210             Y->getRow(i,y);
00211             externalProductAcc(XtX, x,x);
00212             externalProductAcc(XtY, x,y);
00213             sum_squared_Y += dot(y,y);
00214             y *= y;                              
00215             outputwise_sum_squared_Y += y;
00216         }
00217         // *************
00218     }
00219 
00220     // add weight_decay on the diagonal of XX' (except for the bias)
00221     for (int i=apply_decay_from; i<XtX.length(); i++)
00222         XtX(i,i) += weight_decay;
00223 
00224     // VMat(XtX)->savePMAT("plXtX.pmat");
00225     // VMat(XtY)->savePMAT("plXtY.pmat");
00226 
00227     if (cholesky) {
00228         // now solve by Cholesky decomposition
00229         solveLinearSystemByCholesky(XtX,XtY,theta_t);
00230     } else {
00231         theta_t = solveLinearSystem(XtX, XtY);
00232     }
00233 
00234     real squared_loss=0;
00235     if (return_squared_loss)
00236     {
00237         // squared loss = sum_{ij} theta_{ij} (X'W X theta')_{ij} + sum_{t,i} Y_{ti}^2 - 2 sum_{ij} theta_{ij} (X'W Y)_{ij}
00238         Mat M(inputsize,targetsize);
00239         product(M,XtX,theta_t);
00240         squared_loss += dot(M,theta_t); //
00241         squared_loss += sum_squared_Y;
00242         squared_loss -= 2*dot(XtY,theta_t);
00243     }
00244     return squared_loss/inputs.length();
00245 }
00246 
00247 Mat linearRegression(VMat inputs, VMat outputs, real weight_decay, bool include_bias)
00248 {
00249     int n = inputs.width()+(include_bias?1:0);
00250     int n_outputs = outputs.width();
00251     Mat XtX(n,n);
00252     Mat XtY(n,n_outputs);
00253     Mat theta_t(n,n_outputs);
00254     real sy=0;
00255     Vec outputwise_sum_squared_Y;
00256     if(include_bias)
00257         inputs = new ExtendedVMatrix(inputs,0,0,1,0,1.0); // prepend a first column of ones
00258     linearRegression(inputs, outputs, weight_decay, theta_t,
00259                      false, XtX, XtY, sy, outputwise_sum_squared_Y);
00260     return theta_t;
00261 }
00262 
00263 
00264 real weightedLinearRegression(
00265     VMat inputs, VMat outputs, VMat gammas, real weight_decay, Mat theta_t,
00266     bool use_precomputed_XtX_XtY, Mat XtX, Mat XtY,
00267     real& sum_squared_Y, Vec& outputwise_sum_squared_Y,
00268     real& sum_gammas, bool return_squared_loss, int verbose_every,
00269     bool cholesky, int apply_decay_from)
00270 {
00271     int inputsize = inputs.width();
00272     int targetsize = outputs.width();
00273     if (outputs.length()!=inputs.length())
00274         PLERROR("linearRegression: inputs.length()=%d while outputs.length()=%d",inputs.length(),outputs.length());
00275     if (theta_t.length()!=inputsize || theta_t.width()!=targetsize)
00276         PLERROR("linearRegression: theta_t(%d,%d) should be (%dx%d)",
00277                 theta_t.length(),theta_t.width(),inputsize,targetsize);
00278 
00279     if(XtX.length()!=inputsize || XtX.width()!=inputsize)
00280         PLERROR("In linearRegression: XtX should have dimensions %dx%d (inputs.width())x(inputs.width())",
00281                 inputsize,inputsize);
00282     if(XtY.length()!=inputsize || XtY.width()!=targetsize)
00283         PLERROR("In linearRegression: XtY should have dimensions %dx%d (inputs.width())x(outputs.width())",
00284                 inputsize,targetsize);
00285 
00286     int l=inputs.length();
00287     if(!use_precomputed_XtX_XtY) // then compute them
00288     {
00289         XtX.clear();
00290         XtY.clear();
00291         // VMat X = new ExtendedVMatrix(inputs,0,0,1,0,1.0); // prepend a first column of ones
00292         VMat X = inputs;
00293         VMat Y = outputs;
00294         outputwise_sum_squared_Y.resize(targetsize);
00295         outputwise_sum_squared_Y.fill(0.0);
00296 
00297         sum_squared_Y= 0.0;
00298         sum_gammas= 0.0;
00299 
00300         // Prepare to comnpute weighted XtX and XtY
00301         Vec x(X.width());
00302         Vec y(Y.width());
00303         real gamma_i;
00304 
00305         // Display progress bar iff we have some verbosity
00306         PP<ProgressBar> pb(
00307             verbose_every?
00308             new ProgressBar("Performing Weighted Linear Regression", l) : 0);
00309 
00310         for(int i=0; i<l; i++)
00311         {
00312             if (pb)
00313                 pb->update(i);
00314 
00315             X->getRow(i,x);
00316             Y->getRow(i,y);
00317             gamma_i = gammas(i,0);
00318             externalProductScaleAcc(XtX, x,x,gamma_i);
00319             externalProductScaleAcc(XtY, x,y,gamma_i);
00320             sum_squared_Y += gamma_i * dot(y,y);
00321             sum_gammas += gamma_i;
00322             y *= gamma_i*y;                                
00323             outputwise_sum_squared_Y += y;
00324         }
00325     }
00326 
00327     // add weight_decay on the diagonal of XX' (except for the bias)
00328     for (int i=apply_decay_from; i<XtX.length(); i++)
00329         XtX(i,i) += weight_decay;
00330 
00331     if (cholesky) {
00332         // now solve by Cholesky decomposition
00333         solveLinearSystemByCholesky(XtX,XtY,theta_t);
00334     } else {
00335         theta_t = solveLinearSystem(XtX, XtY);
00336     }
00337 
00338     real squared_loss=0;
00339     if (return_squared_loss)
00340     {
00341         // squared loss = sum_{ij} theta_{ij} (X'W X theta')_{ij} + sum_{t,i} gamma_t*Y_{ti}^2 - 2 sum_{ij} theta_{ij} (X'W Y)_{ij}
00342         Mat M(inputsize,targetsize);
00343         product(M,XtX,theta_t);
00344         squared_loss += dot(M,theta_t); //
00345         squared_loss += sum_squared_Y;
00346         squared_loss -= 2*dot(XtY,theta_t);
00347     }
00348     // return squared_loss/l;
00349     // perr << "linreg/l: " << squared_loss << "/" << l << "=" << squared_loss/l << endl;
00350     // perr << "linreg/sg: " << squared_loss << "/" << sum_gammas << "=" << squared_loss/sum_gammas << endl;
00351     return squared_loss/sum_gammas;
00352 }
00353 
00356 Mat weightedLinearRegression(VMat inputs, VMat outputs, VMat gammas,
00357                              real weight_decay, bool include_bias)
00358 {
00359     int n = inputs.width()+(include_bias?1:0);
00360     int n_outputs = outputs.width();
00361     Mat XtX(n,n);
00362     Mat XtY(n,n_outputs);
00363     Mat theta_t(n,n_outputs);
00364     real sy=0;
00365     real sg=0;
00366     Vec outputwise_sum_squared_Y;
00367     if(include_bias)
00368         inputs = new ExtendedVMatrix(inputs,0,0,1,0,1.0); // prepend a first column of ones
00369     weightedLinearRegression(inputs, outputs, gammas, weight_decay, theta_t,
00370                              false, XtX, XtY, sy, outputwise_sum_squared_Y,
00371                              sg);
00372     return theta_t;
00373 }
00374 
00375 
00376 } // end of namespace PLearn
00377 
00378 
00379 /*
00380   Local Variables:
00381   mode:c++
00382   c-basic-offset:4
00383   c-file-style:"stroustrup"
00384   c-file-offsets:((innamespace . 0)(inline-open . 0))
00385   indent-tabs-mode:nil
00386   fill-column:79
00387   End:
00388 */
00389 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines