PLearn 0.1
ProjectionErrorVariable.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal
00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal
00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion
00008 
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036 
00037 
00038 /* *******************************************************      
00039  * $Id: ProjectionErrorVariable.cc 8773 2008-04-08 19:36:07Z saintmlx $
00040  * This file is part of the PLearn library.
00041  ******************************************************* */
00042 
00043 #include "ProjectionErrorVariable.h"
00044 #include "Var_operators.h"
00045 #include <plearn/math/plapack.h>
00046 //#include "Var_utils.h"
00047 
00048 namespace PLearn {
00049 using namespace std;
00050 
00051 
00054 PLEARN_IMPLEMENT_OBJECT(
00055     ProjectionErrorVariable,
00056     "Computes the projection error of a set of vectors on a non-orthogonal basis.\n",
00057     "The first input is a set of n_dim vectors (possibly seen as a single vector of their concatenation) f_i, each in R^n\n"
00058     "The second input is a set of T vectors (possibly seen as a single vector of their concatenation) t_j, each in R^n\n"
00059     "There are several options that control which kind of projection error is actually computed:\n"
00060     "If !use_subspace_distance {the recommended setting}, the output is\n"
00061     "    sum_j min_w || t_j - sum_i w_i f_i ||^2 / ||t_j||^2\n"
00062     "where the denominator can be eliminated (not recommended) by turning off the\n"
00063     "normalize_by_neighbor_distance option. In this expression, w is a local\n"
00064     "n_dim-vector that is optmized analytically.\n"
00065     "\n"
00066     "If the 'ordered_vectors' is set, the gradient is not computed truthfully\n"
00067     "but in such a way as to induce a natural ordering among the vectors f_i.\n"
00068     "For each f_i, the above criterion is applied using a projection that\n"
00069     "involves only the first i vectors f_1...f_i. In this way the first vector f_1\n"
00070     "tries to *explain* the vectors t_j as well as possible with a single dimension,\n"
00071     "and the vector f_2 learns to *explain* what f_2 did not already predict, etc...\n"
00072     "When this option is set, we also choose the w_i in the same greedy way, starting\n"
00073     "from w_1 chosen to minimize the projection error wrt f_1, w_2 chosen to minimize the\n"
00074     "residual projection error left on f_2, etc... Hence the cost minimized wrt f_k on neighbor j is\n"
00075     "  ||t_j - sum_{i<=k} w_i f_i||^2 / ||t_j||^2\n"
00076     "(this cost is minimized to choose w_k, and to get a gradient on f_k as well).\n"
00077     "In that case no SVD is used, instead one obtains an analytic solution for w_k:\n"
00078     "  w_k = (t_j . f_k - sum_{i<k} w_i f_i . f_k)/||f_k||^2.\n"
00079     "The output produced by fprop is sum_j || t_j - sum_i w_i f_i ||^2 / ||t_j||^2\n"
00080     "where the w_i are chosen as in the previous equation.\n"
00081     "However, if use_subspace_distance (not recommended), the output is\n"
00082     "     min_{w,u}  || sum_i w_i f_i  -  sum_j u_j t_j ||^2 .\n"
00083     "In both cases, if norm_penalization>0, an extra term is added:\n"
00084     "    norm_penalization * sum_i (||f_i||^2 - 1)^2.\n"
00085     "The 'epsilon' and 'regularization' options are used to regularize the SVD-based matrix\n"
00086     "inversion involved in minimizing for w: only the singular values of F' that are\n"
00087     "above 'epsilon' are inverted (and their singular vectors considered, and then they\n"
00088     "are incremented by 'regularization' before inverting.\n"
00089     );
00090 
00091 ProjectionErrorVariable::ProjectionErrorVariable(Variable* input1, Variable* input2, int n_, 
00092                                                  bool normalize_by_neighbor_distance_, 
00093                                                  bool use_subspace_distance_, 
00094                                                  real norm_penalization_, real epsilon_, 
00095                                                  real regularization_, bool ordered_vectors_)
00096     : inherited(input1, input2, 1, 1), n(n_), use_subspace_distance(use_subspace_distance_), 
00097       normalize_by_neighbor_distance(normalize_by_neighbor_distance_), norm_penalization(norm_penalization_), 
00098       epsilon(epsilon_),  regularization(regularization_), ordered_vectors(ordered_vectors_)
00099 {
00100     build_();
00101 }
00102 
00103 void
00104 ProjectionErrorVariable::build()
00105 {
00106     inherited::build();
00107     build_();
00108 }
00109 
00110 void
00111 ProjectionErrorVariable::build_()
00112 {
00113     if (input1 && input2) {
00114         if ((input1->length()==1 && input1->width()>1) || 
00115             (input1->width()==1 && input1->length()>1))
00116         {
00117             if (n<0) PLERROR("ProjectionErrorVariable: Either the input should be matrices or n should be specified\n");
00118             n_dim = input1->size()/n;
00119             if (n_dim*n != input1->size())
00120                 PLERROR("ProjectErrorVariable: the first input size should be an integer multiple of n");
00121         }
00122         else 
00123             n_dim = input1->length();
00124         if ((input2->length()==1 && input2->width()>1) || 
00125             (input2->width()==1 && input2->length()>1))
00126         {
00127             if (n<0) PLERROR("ProjectionErrorVariable: Either the input should be matrices or n should be specified\n");
00128             T = input2->size()/n;
00129             if (T*n != input2->size())
00130                 PLERROR("ProjectErrorVariable: the second input size should be an integer multiple of n");
00131         }
00132         else 
00133             T = input2->length();
00134 
00135         F = input1->value.toMat(n_dim,n);
00136         dF = input1->gradient.toMat(n_dim,n);
00137         TT = input2->value.toMat(T,n);
00138         if (n<0) n = input1->width();
00139         if (input2->width()!=n)
00140             PLERROR("ProjectErrorVariable: the two arguments have inconsistant sizes");
00141         if (n_dim>n)
00142             PLERROR("ProjectErrorVariable: n_dim should be less than data dimension n");
00143         if (!use_subspace_distance)
00144         {
00145             if (ordered_vectors)
00146             {
00147                 norm_f.resize(n_dim);
00148             }
00149             else
00150             {
00151                 V.resize(n_dim,n_dim);
00152                 Ut.resize(n,n);
00153                 B.resize(n_dim,n);
00154                 VVt.resize(n_dim,n_dim);
00155             }
00156             fw_minus_t.resize(T,n);
00157             w.resize(T,n_dim);
00158             one_over_norm_T.resize(T);
00159         }
00160         else 
00161         {
00162             wwuu.resize(n_dim+T);
00163             ww = wwuu.subVec(0,n_dim);
00164             uu = wwuu.subVec(n_dim,T);
00165             wwuuM = wwuu.toMat(1,n_dim+T);
00166             rhs.resize(n_dim+T);
00167             rhs.subVec(0,n_dim).fill(-1.0);
00168             A.resize(n_dim+T,n_dim+T);
00169             A11 = A.subMat(0,0,n_dim,n_dim);
00170             A12 = A.subMat(0,n_dim,n_dim,T);
00171             A21 = A.subMat(n_dim,0,T,n_dim);
00172             A22 = A.subMat(n_dim,n_dim,T,T);
00173             Tu.resize(n);
00174             FT.resize(n_dim+T,n);
00175             FT1 = FT.subMat(0,0,n_dim,n);
00176             FT2 = FT.subMat(n_dim,0,T,n);
00177             Ut.resize(n,n);
00178             V.resize(n_dim+T,n_dim+T);
00179         }
00180         fw.resize(n);
00181         if (norm_penalization>0)
00182             norm_err.resize(n_dim);
00183     }
00184 }
00185 
00186 
00187 void ProjectionErrorVariable::recomputeSize(int& len, int& wid) const
00188 {
00189     len = 1;
00190     wid = 1;
00191 }
00192 
00193 void ProjectionErrorVariable::fprop()
00194 {
00195     // Let F the input1 matrix with rows f_i.
00196     // IF use_subspace_distance THEN
00197     //  We need to solve the system
00198     //    | FF'  -FT'| |w|   | 1 |
00199     //    |          | | | = |   |
00200     //    |-TF'   TT'| |u|   | 0 |
00201     //  in (w,u), and then scale both down by ||w|| so as to enforce ||w||=1.
00202     //
00203     // ELSE IF !ordered_vectors
00204     //  We need to solve the system 
00205     //     F F' w_j = F t_j
00206     //  for each t_j in order to find the solution w of
00207     //    min_{w_j} || t_j - sum_i w_{ji} f_i ||^2
00208     //  for each j. Then sum over j the above square errors.
00209     //  Let F' = U S V' the SVD of F'. Then
00210     //    w_j = (F F')^{-1} F t_j = (V S U' U S V')^{-1} F t_j = V S^{-2} V' F t_j.
00211     //  Note that we can pre-compute
00212     //    B = V S^{-2} V' F = V S^{-1} U'
00213     //  and
00214     //    w_j = B t_j is our solution.
00215     // ELSE (ordered_vectors && !use_subspace_distance)
00216     //  for each j
00217     //   for each k
00218     //     w_{jk} = (t_j . f_k - sum_{i<k} w_i f_i . f_k)/||f_k||^2
00219     //  cost = sum_j || t_j - sum_i w_i f_i||^2 / ||t_j||^2
00220     // ENDIF
00221     //
00222     // if  norm_penalization>0 then also add the following term:
00223     //   norm_penalization * sum_i (||f_i||^2 - 1)^2
00224     //
00225     real cost = 0;
00226     if (use_subspace_distance)
00227     {
00228         // use SVD of (F' -T')
00229         FT1 << F;
00230         multiply(FT2,TT,static_cast<real>(-1.0));
00231         lapackSVD(FT, Ut, S, V);
00232         wwuu.clear();//
00233         for (int k=0;k<S.length();k++)
00234         {
00235             real s_k = S[k];
00236             real sv = s_k+ regularization;
00237             real coef = 1/(sv * sv);
00238             if (s_k>epsilon) // ignore the components that have too small singular value (more robust solution)
00239             {
00240                 real sum_first_elements = 0;
00241                 for (int j=0;j<n_dim;j++) 
00242                     sum_first_elements += V(j,k);
00243                 for (int i=0;i<n_dim+T;i++)
00244                     wwuu[i] += V(i,k) * sum_first_elements * coef;
00245             }
00246         }
00247 
00248         static bool debugging=false;
00249         if (debugging)
00250         {
00251             productTranspose(A11,F,F);
00252             productTranspose(A12,F,TT);
00253             A12 *= -1.0;
00254             Vec res(ww.length());
00255             product(res,A11,ww);
00256             productAcc(res,A12,uu);
00257             res -= static_cast<real>(1.0);
00258             cout << "norm of error in w equations: " << norm(res) << endl;
00259             Vec res2(uu.length());
00260             transposeProduct(res2,A12,ww);
00261             productTranspose(A22,TT,TT);
00262             productAcc(res2,A22,uu);
00263             cout << "norm of error in u equations: " << norm(res2) << endl;
00264         }
00265         // scale w and u so that ||w|| = 1
00266         real wnorm = sum(ww); // norm(ww);
00267         wwuu *= 1.0/wnorm;
00268 
00269         // compute the cost = ||F'w - T'u||^2
00270         transposeProduct(fw,F,ww);
00271         transposeProduct(Tu,TT,uu);
00272         fw -= Tu;
00273         cost = pownorm(fw);
00274     }
00275     else // PART THAT IS REALLY USED STARTS HERE
00276         if (ordered_vectors)
00277         {
00278             // compute 1/||f_k||^2 into norm_f
00279             for (int k=0;k<n_dim;k++)
00280             {
00281                 Vec fk = F(k);
00282                 norm_f[k] = 1.0/pownorm(fk);
00283             }
00284             for(int j=0; j<T;j++)
00285             {
00286                 Vec tj = TT(j);
00287                 Vec wj = w(j);
00288                 // w_{jk} = (t_j . f_k - sum_{i<k} w_i f_i . f_k)/||f_k||^2            
00289                 for (int k=0;k<n_dim;k++)
00290                 {
00291                     Vec fk = F(k);
00292                     real s = dot(tj,fk); 
00293                     for (int i=0;i<k;i++)
00294                         s -= wj[i] * dot(F(i),fk);
00295                     wj[k] = s * norm_f[k];
00296                 }
00297                 transposeProduct(fw, F, wj); // fw = sum_i w_ji f_i = z_m
00298                 Vec fw_minus_tj = fw_minus_t(j);
00299                 substract(fw,tj,fw_minus_tj); // -z_n = z_m - z
00300                 if (normalize_by_neighbor_distance) // THAT'S THE ONE WHICH WORKS WELL:
00301                 {
00302                     one_over_norm_T[j] = 1.0/pownorm(tj); // = 1/||z||
00303                     cost += sumsquare(fw_minus_tj)*one_over_norm_T[j]; // = ||z_n||^2 / ||z||^2
00304                 }
00305                 else
00306                     cost += sumsquare(fw_minus_tj);
00307             }
00308         }
00309         else
00310         {
00311             static Mat F_copy;
00312             F_copy.resize(F.length(),F.width());
00313             F_copy << F;
00314             // N.B. this is the SVD of F'
00315             lapackSVD(F_copy, Ut, S, V);
00316             B.clear();
00317             for (int k=0;k<S.length();k++)
00318             {
00319                 real s_k = S[k];
00320                 if (s_k>epsilon) // ignore the components that have too small singular value (more robust solution)
00321                 { 
00322                     s_k += regularization;
00323                     real coef = 1/s_k;
00324                     for (int i=0;i<n_dim;i++)
00325                     {
00326                         real* Bi = B[i];
00327                         for (int j=0;j<n;j++)
00328                             Bi[j] += V(i,k)*Ut(k,j)*coef;
00329                     }
00330                 }
00331             }
00332             //  now we have B, we can compute the w's and the cost
00333             for(int j=0; j<T;j++)
00334             {
00335                 Vec tj = TT(j);
00336 
00337                 Vec wj = w(j);
00338                 product(wj, B, tj); // w_j = B * t_j = projection weights for neighbor j
00339                 transposeProduct(fw, F, wj); // fw = sum_i w_ji f_i = z_m
00340 
00341                 Vec fw_minus_tj = fw_minus_t(j);
00342                 substract(fw,tj,fw_minus_tj); // -z_n = z_m - z
00343                 if (normalize_by_neighbor_distance) // THAT'S THE ONE WHICH WORKS WELL:
00344                 {
00345                     one_over_norm_T[j] = 1.0/pownorm(tj); // = 1/||z||
00346                     cost += sumsquare(fw_minus_tj)*one_over_norm_T[j]; // = ||z_n||^2 / ||z||^2
00347                 }
00348                 else
00349                     cost += sumsquare(fw_minus_tj);
00350             }
00351         }
00352     if (norm_penalization>0)
00353     {
00354         real penalization=0;
00355         for (int i=0;i<n_dim;i++)
00356         {
00357             Vec f_i = F(i);
00358             norm_err[i] = pownorm(f_i)-1;
00359             penalization += norm_err[i]*norm_err[i];
00360         }
00361         cost += norm_penalization*penalization;
00362     }
00363     value[0] = cost/real(T);
00364 }
00365 
00366 
00367 void ProjectionErrorVariable::bprop()
00368 {
00369     // calcule dcost/F et incremente input1->matGadient avec cette valeur
00370     // keeping w fixed
00371     // 
00372     // IF use_subspace_distance
00373     //   dcost/dF = w (F'w - T'u)'
00374     //
00375     // ELSE IF ordered_vectors
00376     //   dcost_k/df_k = sum_j 2(sum_{i<=k} w_i f_i  - t_j) w_k/||t_j||
00377     // 
00378     // ELSE
00379     //   dcost/dfw = 2 (fw - t_j)/||t_j||
00380     //   dfw/df_i = w_i 
00381     //  so 
00382     //   dcost/df_i = sum_j 2(fw - t_j) w_i/||t_j||
00383     //
00384     // IF norm_penalization>0
00385     //   add the following to the gradient of f_i:
00386     //     norm_penalization*2*(||f_i||^2 - 1)*f_i
00387     // N.B. WE CONSIDER THE input2 (t_j's) TO BE FIXED AND DO NOT 
00388     // COMPUTE THE GRADIENT WRT to input2. IF THE USE OF THIS
00389     // OBJECT CHANGES THIS MAY HAVE TO BE REVISED.
00390     //
00391 
00392     if (use_subspace_distance)
00393     {
00394         externalProductScaleAcc(dF,ww,fw,gradient[0]);
00395         if (norm_penalization>0)
00396             for (int i=0;i<n_dim;i++)
00397             {
00398                 Vec df_i = dF(i); // n-vector
00399                 multiplyAcc(df_i, F(i), gradient[0]*norm_penalization*2*norm_err[i]);
00400             }
00401     }
00402     else if (ordered_vectors)
00403     {
00404         for (int j=0;j<T;j++)
00405         {
00406             fw.clear();
00407             Vec wj = w(j);
00408             Vec fw_minus_tj = fw_minus_t(j); // n-vector
00409             Vec tj = TT(j);
00410             for (int k=0;k<n_dim;k++)
00411             {
00412                 Vec f_k = F(k); // n-vector
00413                 Vec df_k = dF(k); // n-vector
00414                 multiplyAcc(fw,f_k,wj[k]);
00415                 substract(fw,tj,fw_minus_tj);
00416                 if (normalize_by_neighbor_distance)
00417                     multiplyAcc(df_k,fw_minus_tj,gradient[0] * wj[k] * 2 * one_over_norm_T[j]/real(T));
00418                 else
00419                     multiplyAcc(df_k,fw_minus_tj,gradient[0] * wj[k] * 2/real(T));
00420             }
00421         }
00422     }
00423     else
00424     {
00425         for (int j=0;j<T;j++)
00426         {
00427             Vec fw_minus_tj = fw_minus_t(j); // n-vector
00428             Vec wj = w(j);
00429             for (int i=0;i<n_dim;i++)
00430             {
00431                 Vec df_i = dF(i); // n-vector
00432                 if (normalize_by_neighbor_distance)
00433                     multiplyAcc(df_i, fw_minus_tj, gradient[0] * wj[i]*2*one_over_norm_T[j]/real(T));
00434                 else
00435                     multiplyAcc(df_i, fw_minus_tj, gradient[0] * wj[i]*2/real(T));
00436                 if (norm_penalization>0)
00437                     multiplyAcc(df_i, F(i), gradient[0]*norm_penalization*2*norm_err[i]/real(T));
00438             }
00439         }
00440     }
00441 }
00442 
00443 
00444 void ProjectionErrorVariable::symbolicBprop()
00445 {
00446     PLERROR("Not implemented");
00447 }
00448 
00449 } // end of namespace PLearn
00450 
00451 
00452 /*
00453   Local Variables:
00454   mode:c++
00455   c-basic-offset:4
00456   c-file-style:"stroustrup"
00457   c-file-offsets:((innamespace . 0)(inline-open . 0))
00458   indent-tabs-mode:nil
00459   fill-column:79
00460   End:
00461 */
00462 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines