PLearn 0.1
SoftSoftMaxVariable.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // SoftSoftMaxVariable.cc
00004 //
00005 // Copyright (C) 2007 Simon Lemieux, Pascal Vincent
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Simon Lemieux, Pascal Vincent
00036 
00040 #include "SoftSoftMaxVariable.h"
00041 
00042 namespace PLearn {
00043 using namespace std;
00044 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     SoftSoftMaxVariable,
00049     "Kind of softmax variable",
00050     "Let X:=input1, A:=input2\nThen output(n,k) = exp(X(n,k))/(sum_j{exp[X(n,j)+A(k,j)]})"
00051     );
00052 
00053 
00054 /*
00055 All matrices must be contiguous space storage.
00056 X is a (n,d) matrix
00057 U is a (d,d) matrix
00058 out is a (n,d) matrix
00059 
00060 Beware: You must ensure that U_kk = 0 prior to calling these functions, as they assume this is true
00061 
00062 */
00063 
00064 // Provided are two versions: a twopass version and asinglepass version. 
00065 // The twopass version does a first pass to find the max (no transcendental involved), and a second pass where it calls a single transcendental (exp), 
00066 // The singlepass version repeatedly calls scalar logadd which means two transcendentals exp and log (possibly yields a numerically more accurate result).
00067 // I don't know which is faster.
00068 
00069 
00070 #define SOFTSOFTMAX_SAFELOG safelog
00071 #define SOFTSOFTMAX_EXP exp
00072 #define SOFTSOFTMAX_SAFEEXP safeexp
00073 #define SOFTSOFTMAX_LOGADD(a,b) ( ((a)>(b)) ? (a)+log1p(exp((b)-(a))) : (b)+log1p(exp((a)-(b))) )
00074 // #define SOFTSOFTMAX_LOGADD(a,b) ( ((a)>(b)) ? (a)+softplus((b)-(a)) : (b)+softplus((a)-(b)) )
00075 // #define SOFTSOFTMAX_LOGADD(a,b) logadd(a,b)
00076 
00077 
00078 // Singlepass version does a stable logadd computation by repeatedly calling scalar logadd (as in normal reduction)
00079 void softsoftmax_fprop_singlepass_version(int n, int d, 
00080                                           const real* __restrict__ const X, 
00081                                           const real* __restrict__ const U, 
00082                                           real* __restrict__ const H)
00083 {
00084   int Hpos = 0;
00085   int xistart = 0;
00086   for(int i=0; i<n; i++, xistart+=d)
00087     {
00088 
00089       int upos  = 0;
00090       for(int j=0; j<d; j++)
00091         {
00092           real Xij = X[xistart+j];
00093 
00094           real res = X[xistart] + U[upos++] - Xij;
00095           for(int xpos=xistart+1; xpos<xistart+d; xpos++, upos++)
00096             {
00097               real newelem = X[xpos] + U[upos] - Xij;
00098               res = SOFTSOFTMAX_LOGADD(res,newelem);
00099             }
00100 
00101           H[Hpos++] = SOFTSOFTMAX_SAFEEXP(-res);
00102         }
00103     }
00104 }
00105 
00106 // Twopass version does a stable logadd computation by first finding the max
00107 void softsoftmax_fprop_twopass_version(int n, int d, 
00108                                        const real* __restrict__ const X, 
00109                                        const real* __restrict__ const U, 
00110                                        real* __restrict__ const H)
00111 {
00112   int Hpos = 0;
00113   int xistart = 0;
00114   for(int i=0; i<n; i++, xistart+=d)
00115     {
00116       int uposstart  = 0;
00117       for(int j=0; j<d; j++, uposstart+=d)
00118         {
00119           real maxelem = X[xistart] + U[uposstart];
00120           for(int xpos=xistart+1, upos=uposstart+1; xpos<xistart+d; xpos++, upos++)
00121             {
00122               real elem = X[xpos] + U[upos];
00123               if(elem>maxelem)
00124                 maxelem = elem;
00125             }
00126           real res = 0;
00127           for(int xpos=xistart, upos=uposstart; xpos<xistart+d; xpos++, upos++)
00128             res += SOFTSOFTMAX_EXP(X[xpos] + U[upos] - maxelem);
00129           res = maxelem + SOFTSOFTMAX_SAFELOG(res) - X[xistart+j];
00130 
00131           H[Hpos++] = SOFTSOFTMAX_SAFEEXP(-res);
00132         }
00133     }
00134 }
00135 
00136 // Twopass version does a stable logadd computation by first finding the max
00137 void softsoftmax_with_log_twopass_version(int n, int d, 
00138                                           const real* __restrict__ const X, 
00139                                           const real* __restrict__ const U, 
00140                                           real* __restrict__ const logH,
00141                                           real* __restrict__ const H)
00142 {
00143   int Hpos = 0;
00144   int xistart = 0;
00145   for(int i=0; i<n; i++, xistart+=d)
00146     {
00147       int uposstart  = 0;
00148       for(int j=0; j<d; j++, uposstart+=d)
00149         {
00150           real maxelem = X[xistart] + U[uposstart];
00151           for(int xpos=xistart+1, upos=uposstart+1; xpos<xistart+d; xpos++, upos++)
00152             {
00153               real elem = X[xpos] + U[upos];
00154               if(elem>maxelem)
00155                 maxelem = elem;
00156             }
00157           real res = 0;
00158           for(int xpos=xistart, upos=uposstart; xpos<xistart+d; xpos++, upos++)
00159             res += SOFTSOFTMAX_EXP(X[xpos] + U[upos] - maxelem);
00160           res = -(maxelem + SOFTSOFTMAX_SAFELOG(res) - X[xistart+j]);
00161 
00162           logH[Hpos] = res;
00163           H[Hpos] = SOFTSOFTMAX_SAFEEXP(res);
00164           Hpos++;
00165         }
00166     }
00167 }
00168 
00169 
00170 // Hardapprox version uses only the max of the denominator terms
00171 void softsoftmax_fprop_hardapprox_version(int n, int d, 
00172                                           const real* __restrict__ const X, 
00173                                           const real* __restrict__ const U, 
00174                                           real* __restrict__ const H)
00175 {
00176   int Hpos = 0;
00177   int xistart = 0;
00178   for(int i=0; i<n; i++, xistart+=d)
00179     {
00180       int uposstart  = 0;
00181       for(int j=0; j<d; j++, uposstart+=d)
00182         {
00183           real maxelem = X[xistart] + U[uposstart];
00184           for(int xpos=xistart+1, upos=uposstart+1; xpos<xistart+d; xpos++, upos++)
00185             {
00186               real elem = X[xpos] + U[upos];
00187               if(elem>maxelem)
00188                 maxelem = elem;
00189             }
00190           H[Hpos++] = SOFTSOFTMAX_SAFEEXP(X[xistart+j]-maxelem);
00191         }
00192     }
00193 }
00194 
00195 
00196 void softsoftmax_bprop(int n, int d, 
00197                        const real* __restrict__ const X, 
00198                        const real* __restrict__ const U, 
00199                        const real* __restrict__ const logH,
00200                        const real* __restrict__ const H_gr,
00201                        real* __restrict__ const X_gr,
00202                        real* __restrict__ const U_gr)
00203 {
00204     // Beware: must be passed logH and H_gr, where H_gr is the gradient on H, not on logH. 
00205 
00206     // note: X, logH, H_gr, X_gr  all have the same shape (n,d) 
00207     // Offset positions will be the same for these matrices, so we wont prefix the variable holding offset positions for these.
00208     // However, variable indicating offset positions kj in U and U_gr (which are (d,d) matrices) will be called Ukj_pos.
00209 
00210 
00211     for(int i=0, row_i_pos=0; i<n; i++, row_i_pos+=d)
00212     {
00213         for(int j=0; j<d; j++)
00214         {          
00215             int ij = row_i_pos+j; // ij index offset
00216             real sumk = 0;
00217             for(int k=0, Ukj_pos=j; k<d; k++, Ukj_pos+=d)
00218             {
00219                 // Ukj_pos = k*d+j;
00220                 int ik = row_i_pos+k; // ik index offset
00221                 real l_ik = logH[ik];
00222                 real val_k = -H_gr[ik]*SOFTSOFTMAX_SAFEEXP(U[Ukj_pos] + l_ik+l_ik - X[ik] + X[ij]);
00223                 if(k!=j)
00224                     U_gr[Ukj_pos] += val_k;
00225                 sumk += val_k;
00226             }
00227             real h_ij = SOFTSOFTMAX_SAFEEXP(logH[ij]);
00228             X_gr[ij] += H_gr[ij]*h_ij + sumk; 
00229         }
00230     }
00231 }
00232 
00233 
00234 
00235 // constructor from input variables.
00236 SoftSoftMaxVariable::SoftSoftMaxVariable(Variable* input1, Variable* input2)
00237     : inherited(input1, input2, input1->length(), input1->width())
00238 {
00239     build_();
00240 }
00241 
00242 
00243 void SoftSoftMaxVariable::recomputeSize(int& l, int& w) const
00244 {
00245     // ### usual code to put here is:
00246 
00247         if (input1) {
00248             l = input1->length(); // the computed length of this Var
00249             w = input1->width(); // the computed width
00250         } else
00251             l = w = 0;
00252 }
00253 
00254 // ### computes value from input1 and input2 values
00255 void SoftSoftMaxVariable::fprop()
00256 {
00257     if(input1->matValue.isNotContiguous() || input2->matValue.isNotContiguous())
00258         PLERROR("SoftSoftMaxVariable input matrices must be contiguous.");
00259 
00260     int n = input1->matValue.length();
00261     int d = input1->matValue.width();
00262     
00263     if(input2->matValue.length()!=d || input2->matValue.width()!=d)
00264         PLERROR("SoftSoftMaxVariable second input matriuix (U) must be a square matrix of width and length matching the width of first input matrix");
00265         
00266     // make sure U's diagonal is 0
00267     Mat Umat = input2->matValue;
00268     for(int i=0; i<d; i++)
00269         Umat(i,i) = 0;
00270 
00271     const real* const X = input1->matValue.data();
00272     const real* const U = input2->matValue.data();
00273     real* const H = matValue.data();
00274     logH_mat.resize(n,d);
00275     real* const logH = logH_mat.data();
00276     
00277     softsoftmax_with_log_twopass_version(n, d, X, U, logH, H);
00278     // perr << "Twopass version: " << endl << matValue << endl;
00279     // softsoftmax_fprop_singlepass_version(n, d, X, U, H);
00280     // perr << "Singlepass version: " << endl << matValue << endl;
00281     // perr << "--------------------------------------" << endl;
00282 }
00283 
00284 // ### computes input1 and input2 gradients from gradient
00285 void SoftSoftMaxVariable::bprop()
00286 {
00287     int n = input1->matValue.length();
00288     int d = input1->matValue.width();
00289     const real* const X = input1->matValue.data();
00290     const real* const U = input2->matValue.data();
00291     // const real* const H = matValue.data();
00292     // For numerical reasons we use logH that has been computed during fprop and stored, rather than H that is in output->matValue. 
00293     const real* const logH = logH_mat.data(); 
00294     
00295     const real* const H_gr = matGradient.data();
00296     real* const X_gr = input1->matGradient.data();
00297     real* const U_gr = input2->matGradient.data();
00298 
00299     softsoftmax_bprop(n, d, X, U, logH, H_gr,  
00300                       X_gr, U_gr);
00301 
00302     /*
00303     Mat X = input1->matValue,
00304         A = input2->matValue,
00305         grad_X = input1->matGradient,
00306         grad_A = input2->matGradient;
00307 
00308     real temp;
00309 
00310     //chacun des exemples de X
00311     for (int n=0; n<X.length(); n++)
00312         //chaque coordonné dun exemple //correspond au gradient
00313         for (int k=0; k<X.width(); k++)
00314             //même exemple, coordonnée aussi // correspond à un exemple
00315             for (int j=0; j<X.width(); j++)
00316             {
00317                 temp = matGradient(n,j)*matValue(n,j)*matValue(n,j)*safeexp(X(n,k)+A(j,k))/safeexp(X(n,j));
00318 
00319                 if(k==j)
00320                     grad_X(n,k) += matGradient(n,j)*matValue(n,k)*(1.-matValue(n,k));
00321                 else
00322                     grad_X(n,k) -= temp;                    
00323                                                                                             
00324                 grad_A(j,k) -= temp;
00325             }
00326     */
00327 }
00328 
00329 // ### You can implement these methods:
00330 // void SoftSoftMaxVariable::bbprop() {}
00331 // void SoftSoftMaxVariable::symbolicBprop() {}
00332 // void SoftSoftMaxVariable::rfprop() {}
00333 
00334 
00335 // ### Nothing to add here, simply calls build_
00336 void SoftSoftMaxVariable::build()
00337 {
00338     inherited::build();
00339     build_();
00340 }
00341 
00342 void SoftSoftMaxVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00343 {
00344     inherited::makeDeepCopyFromShallowCopy(copies);
00345 
00346     // ### Call deepCopyField on all "pointer-like" fields
00347     // ### that you wish to be deepCopied rather than
00348     // ### shallow-copied.
00349     // ### ex:
00350     // deepCopyField(trainvec, copies);
00351 
00352     // ### If you want to deepCopy a Var field:
00353     // varDeepCopyField(somevariable, copies);
00354 
00355     // ### Remove this line when you have fully implemented this method.
00356     PLERROR("SoftSoftMaxVariable::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00357 }
00358 
00359 void SoftSoftMaxVariable::declareOptions(OptionList& ol)
00360 {
00361     // ### Declare all of this object's options here.
00362     // ### For the "flags" of each option, you should typically specify
00363     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00364     // ### OptionBase::tuningoption. If you don't provide one of these three,
00365     // ### this option will be ignored when loading values from a script.
00366     // ### You can also combine flags, for example with OptionBase::nosave:
00367     // ### (OptionBase::buildoption | OptionBase::nosave)
00368 
00369     // ### ex:
00370     // declareOption(ol, "myoption", &SoftSoftMaxVariable::myoption,
00371     //               OptionBase::buildoption,
00372     //               "Help text describing this option");
00373     // ...
00374 
00375     // Now call the parent class' declareOptions
00376     inherited::declareOptions(ol);
00377 }
00378 
00379 void SoftSoftMaxVariable::build_()
00380 {
00381     // ### This method should do the real building of the object,
00382     // ### according to set 'options', in *any* situation.
00383     // ### Typical situations include:
00384     // ###  - Initial building of an object from a few user-specified options
00385     // ###  - Building of a "reloaded" object: i.e. from the complete set of
00386     // ###    all serialised options.
00387     // ###  - Updating or "re-building" of an object after a few "tuning"
00388     // ###    options have been modified.
00389     // ### You should assume that the parent class' build_() has already been
00390     // ### called.
00391 }
00392 
00393 
00394 } // end of namespace PLearn
00395 
00396 
00397 /*
00398   Local Variables:
00399   mode:c++
00400   c-basic-offset:4
00401   c-file-style:"stroustrup"
00402   c-file-offsets:((innamespace . 0)(inline-open . 0))
00403   indent-tabs-mode:nil
00404   fill-column:79
00405   End:
00406 */
00407 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines