PLearn 0.1
SparseIncrementalAffineTransformVariable.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal
00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal
00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion
00008 
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036 
00037 
00038 /* *******************************************************      
00039    * $Id: SparseIncrementalAffineTransformVariable.cc 1442 2004-04-27 15:58:16Z morinf $
00040    * This file is part of the PLearn library.
00041    ******************************************************* */
00042 
00043 #include "SparseIncrementalAffineTransformVariable.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 template<class T>
00049 void absargmax(const TMat<T>& mat, int& maxi, int& maxj)
00050 {
00051   #ifdef BOUNDCHECK
00052   if(mat.length()==0 || mat.width()==0)
00053     PLERROR("IN void argmax(const TMat<T>& mat, int& maxi, iny& maxj) mat has 0 size");
00054   #endif
00055   T* m_i = mat.data();
00056   maxi=0;
00057   maxj=0;
00058   double maxval = m_i[0];
00059   for(int i=0; i<mat.length(); i++, m_i+=mat.mod())
00060     for(int j=0; j<mat.width(); j++)
00061       if(fabs(m_i[j])>maxval)
00062         {
00063           maxval = fabs(m_i[j]);
00064           maxi = i;
00065           maxj = j;
00066         }
00067 }
00068 
00069 
00070 PLEARN_IMPLEMENT_OBJECT(SparseIncrementalAffineTransformVariable,
00071                         "Affine transformation of a vector variable, with weights that are sparse and incrementally added.",
00072                         "NO HELP");
00073 
00074 SparseIncrementalAffineTransformVariable::SparseIncrementalAffineTransformVariable(Variable* vec, Variable* transformation, real the_running_average_prop, real the_start_grad_prop)
00075     : inherited(vec, transformation, 
00076                 (vec->size() == 1) ? transformation->width() : (vec->isRowVec() ? 1 : transformation->width()),
00077                 (vec->size() == 1) ? 1 : (vec->isRowVec() ? transformation->width() : 1)),
00078       n_grad_samples(0), has_seen_input(0), n_weights(0), add_n_weights(0), start_grad_prop(the_start_grad_prop), running_average_prop(the_running_average_prop)
00079 {
00080     build_();
00081 }
00082 
00083 
00084 void SparseIncrementalAffineTransformVariable::declareOptions(OptionList& ol)
00085 {
00086   declareOption(ol, "start_grad_prop", &SparseIncrementalAffineTransformVariable::start_grad_prop, OptionBase::buildoption, 
00087                 "Proportion of the average incoming gradient used to initialize the added weights\n");
00088 
00089   declareOption(ol, "add_n_weights", &SparseIncrementalAffineTransformVariable::add_n_weights, OptionBase::buildoption, 
00090                 "Number of weights to add after next bprop\n");
00091 
00092    declareOption(ol, "positions", &SparseIncrementalAffineTransformVariable::positions, OptionBase::learntoption, 
00093                 "Positions of non-zero weights\n");
00094    
00095    declareOption(ol, "sums", &SparseIncrementalAffineTransformVariable::sums, OptionBase::learntoption, 
00096                 "Sums of the incoming gradient\n");
00097    
00098    declareOption(ol, "input_average", &SparseIncrementalAffineTransformVariable::input_average, OptionBase::learntoption, 
00099                 "Average of the input\n");
00100    
00101    declareOption(ol, "n_grad_samples", &SparseIncrementalAffineTransformVariable::n_grad_samples, OptionBase::learntoption, 
00102                 "Number of incoming gradient summed\n");
00103    
00104    declareOption(ol, "has_seen_input", &SparseIncrementalAffineTransformVariable::has_seen_input, OptionBase::learntoption, 
00105                 "Indication that this variable has seen at least one input sample\n");
00106    
00107    declareOption(ol, "n_weights", &SparseIncrementalAffineTransformVariable::n_weights, OptionBase::learntoption, 
00108                 "Number of weights in the affine transform\n");
00109    
00110   inherited::declareOptions(ol);
00111 }
00112 
00113 void
00114 SparseIncrementalAffineTransformVariable::build()
00115 {
00116     inherited::build();
00117     build_();
00118 }
00119 
00120 void
00121 SparseIncrementalAffineTransformVariable::build_()
00122 {
00123     // input1 is vec from constructor
00124     if (input1 && !input1->isVec())
00125       PLERROR("In SparseIncrementalAffineTransformVariable: expecting a vector Var (row or column) as first argument");
00126     if(input1->size() != input2->length()-1)
00127       PLERROR("In SparseIncrementalAffineTransformVariable: transformation matrix (%d+1) and input vector (%d) have incompatible lengths",input2->length()-1,input1->size());
00128 
00129     if(n_grad_samples == 0)
00130     {
00131       sums.resize(input2->length()-1,input2->width());
00132       sums.clear();
00133     }
00134 
00135     if(!has_seen_input)
00136     {
00137       input_average.resize(input2->length()-1);
00138       input_average.clear();
00139       positions.resize(input2->length(),input2->width());
00140       positions.clear();
00141       sc_input.resize(input1->size());
00142       sc_grad.resize(input2->width());
00143       sc_input_grad.resize(input2->length()-1,input2->width());
00144 
00145       // This may not be necessary ...
00146       for(int i=0; i< input1->size(); i++)
00147       {
00148         sc_input[i].forget();
00149         for(int j=0; j< input2->width(); j++)
00150         {
00151           if(i==0) sc_grad[j].forget();
00152           sc_input_grad(i,j).forget();
00153         }
00154       }
00155     }
00156 
00157     temp_grad.resize(input2->length()-1,input2->width());
00158     temp_grad.clear();
00159 }
00160 
00161 void SparseIncrementalAffineTransformVariable::recomputeSize(int& l, int& w) const
00162 { 
00163     if (input1 && input2) {
00164         l = input1->isRowVec() ? 1 : input2->width();
00165         w = input1->isColumnVec() ? 1 : input2->width(); 
00166     } else
00167         l = w = 0;
00168 }
00169 
00170 
00171 void SparseIncrementalAffineTransformVariable::fprop()
00172 {
00173   if( n_weights >= (input2->matValue.length()-1)*input2->matValue.width())
00174   {
00175     value << input2->matValue.firstRow();
00176     Mat lintransform = input2->matValue.subMatRows(1,input2->length()-1);
00177     transposeProductAcc(value, lintransform, input1->value);
00178   }
00179   else
00180   {
00181     value.clear();
00182     /*
00183     if(has_seen_input)
00184       exponentialMovingAverageUpdate(input_average,input1->value,running_average_prop);
00185     else
00186     {
00187       input_average << input1->value;
00188       has_seen_input = true;
00189     }
00190     */
00191 
00192     value << input2->matValue.firstRow();
00193     Mat lintransform = input2->matValue.subMatRows(1,input2->length()-1);
00194     transposeProductAcc(value, lintransform, input1->value);
00195 
00196     /*
00197     for(int i=0; i<positions.length(); i++)
00198     {
00199       position_i = positions[i];
00200       value[i] = position_i.length() != 0 ? input2->matValue(0,i) : 0;
00201       for(int j=0; j<position_i.length(); j++)
00202       {
00203         value[i] += input2->matValue(position_i[j]+1,i) * input1->value[position_i[j]];
00204       }
00205     }
00206     */
00207   }
00208 }
00209 
00210 
00211 void SparseIncrementalAffineTransformVariable::bprop()
00212 {
00213  
00214   if( n_weights >= (input2->matValue.length()-1)*input2->matValue.width())
00215   {
00216     Mat&  afftr = input2->matValue;
00217     int l = afftr.length();
00218     // Vec bias = afftr.firstRow();
00219     Mat lintr = afftr.subMatRows(1,l-1);
00220 
00221     Mat& afftr_g = input2->matGradient;
00222     Vec bias_g = afftr_g.firstRow();
00223     Mat lintr_g = afftr_g.subMatRows(1,l-1);
00224 
00225     bias_g += gradient;    
00226     if(!input1->dont_bprop_here)      
00227       productAcc(input1->gradient, lintr, gradient);
00228     externalProductAcc(lintr_g, input1->value, gradient);
00229   }
00230   else
00231   {
00232     // Update Stats Collector
00233     for(int i=0; i< input1->size(); i++)
00234     {
00235       sc_input[i].update(input1->value[i]);
00236       for(int j=0; j< input2->width(); j++)
00237       {
00238         if(i==0) sc_grad[j].update(gradient[j]);
00239         sc_input_grad(i,j).update(input1->value[i]*gradient[j]);
00240       }
00241     }
00242     
00243     // Update sums of gradient
00244     //externalProductAcc(sums, (input1->value-input_average)/input_stddev, gradient);
00245     n_grad_samples++;
00246     int l = input2->matValue.length();
00247         
00248     
00249       // Set the sums for already added weights to 0
00250       /*
00251       for(int i=0; i<positions.length(); i++)
00252       {
00253         position_i = positions[i];
00254         for(int j=0; j<position_i.length(); j++)
00255           sums(position_i[j],i) = 0;
00256       }
00257       */
00258 
00259       //sums *= positions.subMatRows(1,l-1);      
00260 
00261       if(add_n_weights > 0)
00262       {
00263         // Watch out! This is not compatible with the previous version!
00264         sums.clear();
00265 
00266         Mat positions_lin = positions.subMatRows(1,l-1); 
00267         real* sums_i = sums.data();
00268         real* positions_lin_i = positions_lin.data();
00269         for(int i=0; i<sums.length(); i++, sums_i+=sums.mod(),positions_lin_i+=positions_lin.mod())
00270           for(int j=0; j<sums.width(); j++)
00271           {
00272             //sums_i[j] *= 1-positions_lin_i[j];
00273             if(positions_lin_i[j] == 0)
00274             {
00275               sums_i[j] = safeflog(abs(sc_input_grad(i,j).mean() - sc_input[i].mean() * sc_grad[j].mean()))
00276                 - safeflog( sc_input[i].stddev() * sc_grad[j].stddev());
00277             }
00278           }
00279 
00280         while(add_n_weights >0 && n_weights < (input2->matValue.length()-1)*input2->matValue.width())
00281         {
00282           add_n_weights--;
00283           n_weights++; 
00284           int maxi, maxj;
00285           absargmax(sums,maxi,maxj);
00286           //input2->matValue(maxi+1,maxj) = start_grad_prop * sums(maxi,maxj)/n_grad_samples;
00287           //positions[maxj].push_back(maxi);
00288           if(positions(0,maxj) == 0)
00289             positions(0,maxj) = 1;
00290           positions(maxi+1,maxj) = 1;
00291           sums(maxi,maxj) = 0;
00292         }
00293         // Initialize gradient cumulator
00294         n_grad_samples=0;
00295         sums.clear();
00296 
00297         for(int i=0; i< input1->size(); i++)
00298         {
00299           sc_input[i].forget();
00300           for(int j=0; j< input2->width(); j++)
00301           {
00302             if(i==0) sc_grad[j].forget();
00303             sc_input_grad(i,j).forget();
00304           }
00305         }
00306       }
00307     // Do actual bprop
00308     /*
00309     for(int i=0; i<positions.length(); i++)
00310     {
00311     position_i = positions[i];
00312     input2->matGradient(0,i) += position_i.length() != 0 ? gradient[i] : 0;
00313     for(int j=0; j<position_i.length(); j++)
00314     {
00315     input2->matGradient(position_i[j]+1,i) += gradient[i] * input1->value[position_i[j]];
00316     if(!input1->dont_bprop_here) 
00317     input1->gradient[position_i[j]] += gradient[i] * input2->matValue(position_i[j]+1,i);
00318     }
00319     }
00320     */
00321     Mat&  afftr = input2->matValue;
00322     // Vec bias = afftr.firstRow();
00323     Mat lintr = afftr.subMatRows(1,l-1);
00324 
00325     Mat& afftr_g = input2->matGradient;
00326     Vec bias_g = afftr_g.firstRow();
00327 
00328     multiplyAcc(bias_g,gradient,positions.firstRow());    
00329     if(!input1->dont_bprop_here)      
00330       productAcc(input1->gradient, lintr, gradient);
00331     externalProduct(temp_grad, input1->value, gradient);
00332     temp_grad *= positions.subMatRows(1,l-1);
00333     afftr_g.subMatRows(1,l-1) += temp_grad;
00334   }
00335 }
00336 
00337 
00338 void SparseIncrementalAffineTransformVariable::symbolicBprop()
00339 {
00340   PLERROR("SparseIncrementalAffineTransformVariable::symbolicBprop() not implemented");
00341 }
00342 
00343 void SparseIncrementalAffineTransformVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00344 {
00345   inherited::makeDeepCopyFromShallowCopy(copies);
00346   deepCopyField(positions, copies);
00347   deepCopyField(sums, copies);
00348   deepCopyField(input_average, copies);  
00349   //deepCopyField(position_i, copies);
00350   deepCopyField(temp_grad,copies);
00351   deepCopyField(sc_input,copies);
00352   deepCopyField(sc_grad,copies);
00353   deepCopyField(sc_input_grad,copies);
00354 }
00355 
00356 void SparseIncrementalAffineTransformVariable::reset()
00357 {
00358   /*
00359   for(int i=0; i<positions.length(); i++)
00360   {
00361     positions[i].clear();
00362     positions[i].resize(0);
00363   }
00364   */
00365   positions.clear();
00366   sums.clear();
00367   n_grad_samples = 0;
00368   input_average.clear();
00369   has_seen_input = false;
00370   n_weights = 0;
00371   for(int i=0; i< input1->size(); i++)
00372   {
00373     sc_input[i].forget();
00374     for(int j=0; j< input2->width(); j++)
00375     {
00376       if(i==0) sc_grad[j].forget();
00377       sc_input_grad(i,j).forget();
00378     }
00379   }
00380 }
00381 
00382 
00383 
00384 } // end of namespace PLearn
00385 
00386 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines