PLearn 0.1
RBMSparse1DMatrixConnection.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMSparse1DMatrixConnection.cc
00004 //
00005 // Copyright (C) 2008 Jerome Louradour
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Jerome Louradour
00036 
00041 #include "RBMSparse1DMatrixConnection.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     RBMSparse1DMatrixConnection,
00049     "RBM connections with sparses weights, designed for 1D inputs.",
00050     "");
00051 
00052 RBMSparse1DMatrixConnection::RBMSparse1DMatrixConnection( real the_learning_rate ) :
00053     filter_size(-1),
00054     enforce_positive_weights(false)
00055 {
00056 }
00057 
00058 void RBMSparse1DMatrixConnection::declareOptions(OptionList& ol)
00059 {
00060     declareOption(ol, "filter_size", &RBMSparse1DMatrixConnection::filter_size,
00061                   OptionBase::buildoption,
00062                   "Length of each filter. If -1 then input_size is taken (RBMMatrixConnection).");
00063 
00064     declareOption(ol, "enforce_positive_weights", &RBMSparse1DMatrixConnection::enforce_positive_weights,
00065                   OptionBase::buildoption,
00066                   "Whether or not to enforce having positive weights.");
00067 
00068     declareOption(ol, "step_size", &RBMSparse1DMatrixConnection::step_size,
00069                   OptionBase::learntoption,
00070                   "Step between each filter.");
00071 
00072     // Now call the parent class' declareOptions
00073     inherited::declareOptions(ol);
00074 }
00075 
00077 // declareMethods //
00079 void RBMSparse1DMatrixConnection::declareMethods(RemoteMethodMap& rmm)
00080 {
00081     // Insert a backpointer to remote methods; note that this is different from
00082     // declareOptions().
00083     rmm.inherited(inherited::_getRemoteMethodMap_());
00084     declareMethod(
00085         rmm, "getWeights", &RBMSparse1DMatrixConnection::getWeights,
00086         (BodyDoc("Returns the full weights (including 0s).\n"),
00087          RetDoc ("Matrix of weights (n_hidden x input_size)")));
00088 }
00089 
00090 void RBMSparse1DMatrixConnection::build_()
00091 {
00092     if( up_size <= 0 || down_size <= 0 )
00093         return;
00094 
00095     if( filter_size < 0 )
00096         filter_size = down_size;
00097         
00098     step_size = (int)((real)(down_size-filter_size)/(real)(up_size-1));
00099         
00100     PLASSERT( filter_size <= down_size );
00101 
00102     bool needs_forget = false; // do we need to reinitialize the parameters?
00103 
00104     if( weights.length() != up_size ||
00105         weights.width() != filter_size )
00106     {
00107         weights.resize( up_size, filter_size );
00108         needs_forget = true;
00109     }
00110 
00111     weights_pos_stats.resize( up_size, filter_size );
00112     weights_neg_stats.resize( up_size, filter_size );
00113 
00114     if( momentum != 0. )
00115         weights_inc.resize( up_size, filter_size );
00116 
00117     if( needs_forget ) {
00118         forget();
00119     }
00120     
00121     clearStats();
00122 }
00123 
00124 void RBMSparse1DMatrixConnection::build()
00125 {
00126     RBMConnection::build();
00127     build_();
00128 }
00129 
00130 int RBMSparse1DMatrixConnection::filterStart(int idx) const
00131 {
00132     return step_size*idx;
00133 }
00134 
00135 int RBMSparse1DMatrixConnection::filterSize(int idx) const
00136 {
00137     return filter_size;
00138 }
00139 
00140 Mat RBMSparse1DMatrixConnection::getWeights() const
00141 {
00142     Mat w( up_size, down_size);
00143     w.clear();
00144     for ( int i=0; i<up_size; i++)
00145         w(i).subVec( filterStart(i), filterSize(i) ) << weights(i);
00146     return w;
00147 }
00148 
00150 // accumulateStats //
00152 void RBMSparse1DMatrixConnection::accumulatePosStats( const Mat& down_values,
00153                                               const Mat& up_values )
00154 {
00155     int mbs=down_values.length();
00156     PLASSERT(up_values.length()==mbs);
00157     // weights_pos_stats += up_values * down_values'
00158     for ( int i=0; i<up_size; i++)
00159         transposeProductAcc( weights_pos_stats(i),
00160                              down_values.subMatColumns( filterStart(i), filterSize(i) ),
00161                              up_values(i));
00162     pos_count+=mbs;
00163 }
00164 
00165 void RBMSparse1DMatrixConnection::accumulateNegStats( const Mat& down_values,
00166                                               const Mat& up_values )
00167 {
00168     int mbs=down_values.length();
00169     PLASSERT(up_values.length()==mbs);
00170     // weights_neg_stats += up_values * down_values'
00171     for ( int i=0; i<up_size; i++)
00172         transposeProductAcc( weights_neg_stats(i),
00173                              down_values.subMatColumns( filterStart(i), filterSize(i) ),
00174                              up_values(i));
00175     neg_count+=mbs;
00176 }
00177 
00179 // computeProduct //
00181 void RBMSparse1DMatrixConnection::computeProducts(int start, int length,
00182                                           Mat& activations,
00183                                           bool accumulate ) const
00184 {
00185     PLASSERT( activations.width() == length );
00186     activations.resize(inputs_mat.length(), length);
00187     if( going_up )
00188     {
00189         PLASSERT( start+length <= up_size );
00190         // activations(k, i-start) += sum_j weights(i,j) inputs_mat(k, j)
00191         if( accumulate )
00192             for (int i=start; i<start+length; i++)
00193                 productAcc( activations.column(i-start).toVec(),
00194                             inputs_mat.subMatColumns( filterStart(i), filterSize(i) ),
00195                             weights(i) );
00196         else
00197             for (int i=start; i<start+length; i++)
00198                 product( activations.column(i-start).toVec(),
00199                          inputs_mat.subMatColumns( filterStart(i), filterSize(i) ),
00200                          weights(i) );
00201     }
00202     else
00203     {
00204         PLASSERT( start+length <= down_size );
00205         if( !accumulate )
00206             activations.clear();
00207         // activations(k, i-start) += sum_j weights(j,i) inputs_mat(k, j)
00208         Mat all_activations(inputs_mat.length(), down_size);
00209         all_activations.subMatColumns( start, length ) << activations;
00210         for (int i=0; i<up_size; i++)
00211         {
00212             externalProductAcc( all_activations.subMatColumns( filterStart(i), filterSize(i) ),
00213                                 inputs_mat.column(i).toVec(),
00214                                 weights(i) );
00215         }
00216         activations << all_activations.subMatColumns( start, length );
00217     }
00218 }
00219 
00221 // fprop //
00223 void RBMSparse1DMatrixConnection::fprop(const Vec& input, const Mat& rbm_weights,
00224                           Vec& output) const
00225 {
00226     PLERROR("RBMSparse1DMatrixConnection::fprop not implemented.");
00227 }
00228 
00230 // bpropUpdate //
00232 void RBMSparse1DMatrixConnection::bpropUpdate(const Mat& inputs, const Mat& outputs,
00233                                       Mat& input_gradients,
00234                                       const Mat& output_gradients,
00235                                       bool accumulate)
00236 {
00237     PLASSERT( inputs.width() == down_size );
00238     PLASSERT( outputs.width() == up_size );
00239     PLASSERT( output_gradients.width() == up_size );
00240 
00241     if( accumulate )
00242         PLASSERT_MSG( input_gradients.width() == down_size &&
00243                       input_gradients.length() == inputs.length(),
00244                       "Cannot resize input_gradients and accumulate into it" );
00245     else {
00246         input_gradients.resize(inputs.length(), down_size);
00247         input_gradients.clear();
00248     }  
00249         
00250     for (int i=0; i<up_size; i++) {
00251         int filter_start= filterStart(i), length= filterSize(i);
00252         
00253         // input_gradients = output_gradient * weights
00254         externalProductAcc( input_gradients.subMatColumns( filter_start, length ),
00255                             output_gradients.column(i).toVec(),
00256                             weights(i));
00257 
00258         // weights -= learning_rate/n * output_gradients' * inputs
00259         transposeProductScaleAcc( weights(i),
00260                                   inputs.subMatColumns( filter_start, length ),
00261                                   output_gradients.column(i).toVec(),
00262                                   -learning_rate / inputs.length(), real(1));
00263 
00264         if( enforce_positive_weights )
00265             for (int j=0; j<filter_size; j++)
00266                 weights(i,j)= max( real(0), weights(i,j) );
00267    }
00268 
00269     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0)) 
00270         applyWeightPenalty();
00271 }
00272 
00273 
00274 
00276 // bpropAccUpdate //
00278 void RBMSparse1DMatrixConnection::bpropAccUpdate(const TVec<Mat*>& ports_value,
00279                                          const TVec<Mat*>& ports_gradient)
00280 {
00281     //TODO: add weights as port?
00282     PLASSERT( ports_value.length() == nPorts()
00283               && ports_gradient.length() == nPorts() );
00284 
00285     Mat* down = ports_value[0];
00286     //Mat* up = ports_value[1];
00287     Mat* down_grad = ports_gradient[0];
00288     Mat* up_grad = ports_gradient[1];
00289 
00290     PLASSERT( down && !down->isEmpty() );
00291     //PLASSERT( up && !up->isEmpty() );
00292 
00293     int batch_size = down->length();
00294     //PLASSERT( up->length() == batch_size );
00295 
00296     // If we have up_grad
00297     if( up_grad && !up_grad->isEmpty() )
00298     {
00299         // down_grad should not be provided
00300         PLASSERT( !down_grad || down_grad->isEmpty() );
00301         PLASSERT( up_grad->length() == batch_size );
00302         PLASSERT( up_grad->width() == up_size );
00303 
00304         bool compute_down_grad = false;
00305         if( down_grad && down_grad->isEmpty() )
00306         {
00307             compute_down_grad = true;
00308             PLASSERT( down_grad->width() == down_size );
00309             down_grad->resize(batch_size, down_size);
00310         }
00311         
00312         for (int i=0; i<up_size; i++) {
00313             int filter_start= filterStart(i), length= filterSize(i);
00314             
00315             // propagate gradient
00316             // input_gradients = output_gradient * weights
00317             if( compute_down_grad )
00318                 externalProductAcc( down_grad->subMatColumns( filter_start, length ),
00319                                     up_grad->column(i).toVec(),
00320                                     weights(i));
00321 
00322             // update weights
00323             // weights -= learning_rate/n * output_gradients' * inputs
00324             transposeProductScaleAcc( weights(i),
00325                                       down->subMatColumns( filter_start, length ),
00326                                       up_grad->column(i).toVec(),
00327                                       -learning_rate / batch_size, real(1));
00328 
00329             if( enforce_positive_weights )
00330                 for (int j=0; j<filter_size; j++)
00331                     weights(i,j)= max( real(0), weights(i,j) );
00332        }
00333     }
00334     else if( down_grad && !down_grad->isEmpty() )
00335     {
00336         PLERROR("down-up gradient not implemented in RBMSparse1DMatrixConnection::bpropAccUpdate.");
00337 
00338         PLASSERT( down_grad->length() == batch_size );
00339         PLASSERT( down_grad->width() == down_size );
00340     }
00341     else
00342         PLCHECK_MSG( false,
00343                      "Unknown port configuration" );
00344 
00345     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0)) 
00346         applyWeightPenalty();
00347 }
00348 
00349 void RBMSparse1DMatrixConnection::update( const Mat& pos_down_values, // v_0
00350                                   const Mat& pos_up_values,   // h_0
00351                                   const Mat& neg_down_values, // v_1
00352                                   const Mat& neg_up_values )  // h_1
00353 {
00354     // weights += learning_rate * ( h_0 v_0' - h_1 v_1' );
00355     // or:
00356     // weights[i][j] += learning_rate * (h_0[i] v_0[j] - h_1[i] v_1[j]);
00357 
00358     PLASSERT( pos_up_values.width() == weights.length() );
00359     PLASSERT( neg_up_values.width() == weights.length() );
00360     PLASSERT( pos_down_values.width() == down_size );
00361     PLASSERT( neg_down_values.width() == down_size );
00362 
00363     if( momentum == 0. )
00364     {
00365         // We use the average gradient over a mini-batch.
00366         real avg_lr = learning_rate / pos_down_values.length();
00367 
00368         for (int i=0; i<up_size; i++) {
00369             int filter_start= filterStart(i), length= filterSize(i);
00370 
00371             transposeProductScaleAcc( weights(i),
00372                                       pos_down_values.subMatColumns( filter_start, length ),
00373                                       pos_up_values.column(i).toVec(), 
00374                                       avg_lr, real(1));
00375 
00376             transposeProductScaleAcc( weights(i),
00377                                       neg_down_values.subMatColumns( filter_start, length ),
00378                                       neg_up_values.column(i).toVec(),
00379                                       -avg_lr, real(1));
00380 
00381             if( enforce_positive_weights )
00382                 for (int j=0; j<filter_size; j++)
00383                     weights(i,j)= max( real(0), weights(i,j) );
00384         }
00385     }
00386     else
00387         PLERROR("RBMSparse1DMatrixConnection::update minibatch with momentum - Not implemented");
00388 
00389     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0)) 
00390         applyWeightPenalty();
00391 }
00392 
00394 // forget //
00396 // Reset the parameters to the state they would be BEFORE starting training.
00397 void RBMSparse1DMatrixConnection::forget()
00398 {
00399     clearStats();
00400     if( initialization_method == "zero" )
00401         weights.clear();
00402     else
00403     {
00404         if( !random_gen ) {
00405             PLWARNING( "RBMSparse1DMatrixConnection: cannot forget() without"
00406                        " random_gen" );
00407             return;
00408         }
00409         real d = 1. / max( filter_size, up_size );
00410         if( initialization_method == "uniform_sqrt" )
00411             d = sqrt( d );
00412 
00413         if( enforce_positive_weights )
00414             random_gen->fill_random_uniform( weights, real(0), d );
00415         else
00416             random_gen->fill_random_uniform( weights, -d, d );
00417     }
00418     L2_n_updates = 0;
00419 }
00420 
00422 int RBMSparse1DMatrixConnection::nParameters() const
00423 {
00424     return weights.size();
00425 }
00426 
00427 } // end of namespace PLearn
00428 
00429 
00430 /*
00431   Local Variables:
00432   mode:c++
00433   c-basic-offset:4
00434   c-file-style:"stroustrup"
00435   c-file-offsets:((innamespace . 0)(inline-open . 0))
00436   indent-tabs-mode:nil
00437   fill-column:79
00438   End:
00439 */
00440 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines