PLearn 0.1
RBMMatrixTransposeConnection.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMMatrixTransposeConnection.cc
00004 //
00005 // Copyright (C) 2007 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00039 #include "RBMMatrixTransposeConnection.h"
00040 #include <plearn/math/TMat_maths.h>
00041 
00042 namespace PLearn {
00043 using namespace std;
00044 
00045 PLEARN_IMPLEMENT_OBJECT(
00046     RBMMatrixTransposeConnection,
00047     "RBMConnection which uses the tranpose of some other "
00048     "RBMMatrixConnection's weights",
00049     "");
00050 
00051 RBMMatrixTransposeConnection::RBMMatrixTransposeConnection(
00052     PP<RBMMatrixConnection> the_rbm_matrix_connection,
00053     real the_learning_rate,
00054     bool call_build_) :
00055     inherited(the_learning_rate, call_build_),
00056     rbm_matrix_connection(the_rbm_matrix_connection),
00057     learn_scale( false ),
00058     scale( 1.0 )
00059 {
00060     if (call_build_)
00061         build_();
00062 }
00063 
00064 void RBMMatrixTransposeConnection::declareOptions(OptionList& ol)
00065 {
00066     declareOption(ol, "rbm_matrix_connection",
00067                   &RBMMatrixTransposeConnection::rbm_matrix_connection,
00068                   OptionBase::buildoption,
00069                   "RBMMatrixConnection from which the weights are taken");
00070 
00071     declareOption(ol, "learn_scale",
00072                   &RBMMatrixTransposeConnection::learn_scale,
00073                   OptionBase::buildoption,
00074                   "Indication that the scale of the weight matrix should be "
00075                   "learned.\n");
00076 
00077     declareOption(ol, "scale",
00078                   &RBMMatrixTransposeConnection::scale,
00079                   OptionBase::learntoption,
00080                   "Learned scale for weight matrix.\n");
00081 
00082     // Now call the parent class' declareOptions
00083     inherited::declareOptions(ol);
00084 
00085     redeclareOption(ol, "up_size", &RBMConnection::up_size,
00086                     OptionBase::learntoption,
00087                     "Is set to rbm_matrix_connection->down_size.");
00088     redeclareOption(ol, "down_size", &RBMConnection::down_size,
00089                     OptionBase::learntoption,
00090                     "Is set to rbm_matrix_connection->up_size.");
00091 }
00092 
00093 void RBMMatrixTransposeConnection::build_()
00094 {
00095     if( !rbm_matrix_connection )
00096         return;
00097 
00098     // If we have a random_gen and rbm_matrix_connection does not, share it
00099     if( random_gen && !(rbm_matrix_connection->random_gen) )
00100     {
00101         rbm_matrix_connection->random_gen = random_gen;
00102         rbm_matrix_connection->forget();
00103     }
00104     weights = rbm_matrix_connection->weights;
00105     down_size = rbm_matrix_connection->up_size;
00106     up_size = rbm_matrix_connection->down_size;
00107 
00108     // For compatibility with OnlineLearningModule inherited functions
00109     input_size = down_size;
00110     output_size = up_size;
00111 
00112 
00113     weights_pos_stats.resize( down_size, up_size );
00114     weights_neg_stats.resize( down_size, up_size );
00115 
00116     if( momentum != 0. )
00117         weights_inc.resize( down_size, up_size );
00118 
00119     clearStats();
00120 }
00121 
00122 void RBMMatrixTransposeConnection::build()
00123 {
00124     inherited::build();
00125     build_();
00126 }
00127 
00128 
00129 void RBMMatrixTransposeConnection::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00130 {
00131     inherited::makeDeepCopyFromShallowCopy(copies);
00132 
00133     deepCopyField(weights, copies);
00134     deepCopyField(rbm_matrix_connection, copies);
00135     deepCopyField(weights_pos_stats, copies);
00136     deepCopyField(weights_neg_stats, copies);
00137     deepCopyField(weights_inc, copies);
00138 }
00139 
00140 void RBMMatrixTransposeConnection::accumulatePosStats( const Vec& down_values,
00141                                               const Vec& up_values )
00142 {
00143     // weights_pos_stats += down_values * up_values'
00144     externalProductAcc( weights_pos_stats, down_values, up_values );
00145 
00146     pos_count++;
00147 }
00148 
00149 void RBMMatrixTransposeConnection::accumulateNegStats( const Vec& down_values,
00150                                               const Vec& up_values )
00151 {
00152     // weights_neg_stats += down_values * up_values'
00153     externalProductAcc( weights_neg_stats, down_values, up_values );
00154 
00155     neg_count++;
00156 }
00157 
00158 void RBMMatrixTransposeConnection::update()
00159 {
00160     if( learn_scale )
00161         PLERROR("In RBMMatrixTransposeConnection::update(): not implemented "
00162                 "for learned scale");
00163     // updates parameters
00164     //weights -= learning_rate * (weights_pos_stats/pos_count
00165     //                              - weights_neg_stats/neg_count)
00166     real pos_factor = -learning_rate / pos_count;
00167     real neg_factor = learning_rate / neg_count;
00168 
00169     int l = weights.length();
00170     int w = weights.width();
00171 
00172     real* w_i = weights.data();
00173     real* wps_i = weights_pos_stats.data();
00174     real* wns_i = weights_neg_stats.data();
00175     int w_mod = weights.mod();
00176     int wps_mod = weights_pos_stats.mod();
00177     int wns_mod = weights_neg_stats.mod();
00178 
00179     if( momentum == 0. )
00180     {
00181         // no need to use weights_inc
00182         for( int i=0 ; i<l ; i++, w_i+=w_mod, wps_i+=wps_mod, wns_i+=wns_mod )
00183             for( int j=0 ; j<w ; j++ )
00184                 w_i[j] += pos_factor * wps_i[j] + neg_factor * wns_i[j];
00185     }
00186     else
00187     {
00188         // ensure that weights_inc has the right size
00189         weights_inc.resize( l, w );
00190 
00191         // The update rule becomes:
00192         // weights_inc = momentum * weights_inc
00193         //               - learning_rate * (weights_pos_stats/pos_count
00194         //                                  - weights_neg_stats/neg_count);
00195         // weights += weights_inc;
00196         real* winc_i = weights_inc.data();
00197         int winc_mod = weights_inc.mod();
00198         for( int i=0 ; i<l ; i++, w_i += w_mod, wps_i += wps_mod,
00199                              wns_i += wns_mod, winc_i += winc_mod )
00200             for( int j=0 ; j<w ; j++ )
00201             {
00202                 winc_i[j] = momentum * winc_i[j]
00203                     + pos_factor * wps_i[j] + neg_factor * wns_i[j];
00204                 w_i[j] += winc_i[j];
00205             }
00206     }
00207 
00208     clearStats();
00209 }
00210 
00211 // Instead of using the statistics, we assume we have only one markov chain
00212 // runned and we update the parameters from the first 4 values of the chain
00213 void RBMMatrixTransposeConnection::update( const Vec& pos_down_values, // v_0
00214                                            const Vec& pos_up_values,   // h_0
00215                                            const Vec& neg_down_values, // v_1
00216                                            const Vec& neg_up_values )  // h_1
00217 {
00218     if( learn_scale )
00219         PLERROR("In RBMMatrixTransposeConnection::update(): not implemented "
00220                 "for learned scale");
00221 
00222     PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n");
00223     // weights -= learning_rate * ( h_0 v_0' - h_1 v_1' );
00224     // or:
00225     // weights[i][j] += learning_rate * (h_1[i] v_1[j] - h_0[i] v_0[j]);
00226 
00227     int l = weights.length();
00228     int w = weights.width();
00229     PLASSERT( pos_up_values.length() == l );
00230     PLASSERT( neg_up_values.length() == l );
00231     PLASSERT( pos_down_values.length() == w );
00232     PLASSERT( neg_down_values.length() == w );
00233 
00234     real* w_i = weights.data();
00235     real* puv_i = pos_up_values.data();
00236     real* nuv_i = neg_up_values.data();
00237     real* pdv = pos_down_values.data();
00238     real* ndv = neg_down_values.data();
00239     int w_mod = weights.mod();
00240 
00241     if( momentum == 0. )
00242     {
00243         for( int i=0 ; i<l ; i++, w_i += w_mod, puv_i++, nuv_i++ )
00244             for( int j=0 ; j<w ; j++ )
00245                 w_i[j] += learning_rate * (*nuv_i * ndv[j] - *puv_i * pdv[j]);
00246     }
00247     else
00248     {
00249         // ensure that weights_inc has the right size
00250         weights_inc.resize( l, w );
00251 
00252         // The update rule becomes:
00253         // weights_inc = momentum * weights_inc
00254         //               - learning_rate * ( h_0 v_0' - h_1 v_1' );
00255         // weights += weights_inc;
00256 
00257         real* winc_i = weights_inc.data();
00258         int winc_mod = weights_inc.mod();
00259         for( int i=0 ; i<l ; i++, w_i += w_mod, winc_i += winc_mod,
00260                              puv_i++, nuv_i++ )
00261             for( int j=0 ; j<w ; j++ )
00262             {
00263                 winc_i[j] = momentum * winc_i[j]
00264                     + learning_rate * (*nuv_i * ndv[j] - *puv_i * pdv[j]);
00265                 w_i[j] += winc_i[j];
00266             }
00267     }
00268 }
00269 
00270 void RBMMatrixTransposeConnection::clearStats()
00271 {
00272     weights_pos_stats.clear();
00273     weights_neg_stats.clear();
00274 
00275     pos_count = 0;
00276     neg_count = 0;
00277 }
00278 
00279 void RBMMatrixTransposeConnection::computeProduct( int start, int length,
00280                                           const Vec& activations,
00281                                           bool accumulate ) const
00282 {
00283     PLASSERT( activations.length() == length );
00284     PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n");
00285 
00286     if( going_up )
00287     {
00288         PLASSERT( start+length <= up_size );
00289         // activations[i-start] += sum_j weights(i,j) input_vec[j]
00290 
00291         if( accumulate )
00292             transposeProductAcc( activations,
00293                                  weights.subMatColumns(start,length),
00294                                  input_vec );
00295         else
00296             transposeProduct( activations,
00297                               weights.subMatColumns(start,length),
00298                               input_vec );
00299     }
00300     else
00301     {
00302         PLASSERT( start+length <= down_size );
00303         // activations[i-start] += sum_j weights(j,i) input_vec[j]
00304         if( accumulate )
00305             productAcc( activations,
00306                         weights.subMatRows(start,length),
00307                         input_vec );
00308         else
00309             product( activations,
00310                      weights.subMatRows(start,length),
00311                      input_vec );
00312     }
00313     if( learn_scale)
00314         activations *= scale;
00315 }
00316 
00317 void RBMMatrixTransposeConnection::computeProducts(int start, int length,
00318                                           Mat& activations,
00319                                           bool accumulate ) const
00320 {
00321     PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n");
00322     activations.resize(inputs_mat.length(), length);
00323     if( going_up )
00324     {
00325         PLASSERT( start+length <= up_size );
00326         // activations(k, i-start) += sum_j weights(i,j) inputs_mat(k, j)
00327 
00328         if( accumulate )
00329             productAcc(activations,
00330                     inputs_mat,
00331                     weights.subMatColumns(start,length));
00332         else
00333             product(activations,
00334                     inputs_mat,
00335                     weights.subMatColumns(start,length));
00336     }
00337     else
00338     {
00339         PLASSERT( start+length <= down_size );
00340         // activations(k, i-start) += sum_j weights(j,i) inputs_mat(k, j)
00341         if( accumulate )
00342             productTransposeAcc(activations,
00343                     inputs_mat,
00344                     weights.subMatRows(start,length) );
00345         else
00346             productTranspose(activations,
00347                     inputs_mat,
00348                     weights.subMatRows(start,length) );
00349     }
00350 
00351     if( learn_scale)
00352         activations *= scale;
00353 }
00354 
00356 void RBMMatrixTransposeConnection::bpropUpdate(const Vec& input,
00357                                                const Vec& output,
00358                                                Vec& input_gradient,
00359                                                const Vec& output_gradient,
00360                                                bool accumulate)
00361 {
00362     PLASSERT( input.size() == down_size );
00363     PLASSERT( output.size() == up_size );
00364     PLASSERT( output_gradient.size() == up_size );
00365     PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n");
00366 
00367     if( accumulate )
00368     {
00369         PLASSERT_MSG( input_gradient.size() == down_size,
00370                       "Cannot resize input_gradient AND accumulate into it" );
00371 
00372         // input_gradient += weights' * output_gradient
00373         productAcc( input_gradient, weights, output_gradient );
00374     }
00375     else
00376     {
00377         input_gradient.resize( down_size );
00378 
00379         // input_gradient = weights' * output_gradient
00380         product( input_gradient, weights, output_gradient );
00381     }
00382 
00383     // weights -= learning_rate * output_gradient * input'
00384     externalProductScaleAcc( weights, input, output_gradient, -learning_rate );
00385     if( learn_scale )
00386     {
00387         real* in = input.data();
00388         real* out_g;
00389         real* wj;
00390         for( int j=0; j<weights.width(); j++)
00391         {
00392             out_g = output_gradient.data();
00393             wj = weights[j];
00394             for( int i=0; i<weights.length(); i++ )
00395                 scale -= learning_rate * out_g[i] * wj[i] * in[j];
00396         }
00397     }
00398 }
00399 
00400 void RBMMatrixTransposeConnection::bpropUpdate(const Mat& inputs, const Mat& outputs,
00401                                        Mat& input_gradients,
00402                                        const Mat& output_gradients,
00403                                        bool accumulate)
00404 {
00405     PLASSERT( inputs.width() == down_size );
00406     PLASSERT( outputs.width() == up_size );
00407     PLASSERT( output_gradients.width() == up_size );
00408     PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n");
00409 
00410     if( accumulate )
00411     {
00412         PLASSERT_MSG( input_gradients.width() == down_size &&
00413                       input_gradients.length() == inputs.length(),
00414                       "Cannot resize input_gradients and accumulate into it" );
00415 
00416         // input_gradients += output_gradient * weights
00417         productTransposeAcc(input_gradients, output_gradients, weights);
00418     }
00419     else
00420     {
00421         input_gradients.resize(inputs.length(), down_size);
00422         // input_gradients = output_gradient * weights
00423         productTranspose(input_gradients, output_gradients, weights);
00424     }
00425 
00426     // weights -= learning_rate/n * output_gradients' * inputs
00427     transposeProductScaleAcc(weights, inputs, output_gradients,
00428                              -learning_rate / inputs.length(), real(1));
00429 
00430     if( learn_scale )
00431     {
00432         for( int t=0; t<inputs.length(); t++)
00433         {
00434             real* in = inputs[t];
00435             real* out_g;
00436             real* wj;
00437             for( int j=0; j<weights.width(); j++)
00438             {
00439                 out_g = output_gradients[t];
00440                 wj = weights[j];
00441                 for( int i=0; i<weights.length(); i++ )
00442                     scale -= learning_rate * out_g[i] * wj[i] * in[j];
00443             }
00444         }
00445     }
00446 }
00447 
00448 
00451 void RBMMatrixTransposeConnection::forget()
00452 {
00453     PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n");
00454     clearStats();
00455     if( !random_gen )
00456     {
00457         PLWARNING("RBMMatrixTransposeConnection: cannot forget() without"
00458                   " random_gen");
00459         return;
00460     }
00461     if( !(rbm_matrix_connection->random_gen) )
00462         rbm_matrix_connection->random_gen = random_gen;
00463     rbm_matrix_connection->forget();
00464     if( learn_scale )
00465         scale = 1;
00466 }
00467 
00468 
00469 /* THIS METHOD IS OPTIONAL
00474 void RBMMatrixTransposeConnection::finalize()
00475 {
00476 }
00477 */
00478 
00480 int RBMMatrixTransposeConnection::nParameters() const
00481 {
00482     return weights.size();
00483 }
00484 
00490 Vec RBMMatrixTransposeConnection::makeParametersPointHere(const Vec& global_parameters)
00491 {
00492     PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n");
00493     Vec ret = rbm_matrix_connection->makeParametersPointHere(global_parameters);
00494     weights = rbm_matrix_connection->weights;
00495     return ret;
00496 }
00497 
00498 
00499 
00500 } // end of namespace PLearn
00501 
00502 
00503 /*
00504   Local Variables:
00505   mode:c++
00506   c-basic-offset:4
00507   c-file-style:"stroustrup"
00508   c-file-offsets:((innamespace . 0)(inline-open . 0))
00509   indent-tabs-mode:nil
00510   fill-column:79
00511   End:
00512 */
00513 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines