PLearn 0.1
RBMConv2DConnection.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMConv2DConnection.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00039 #define PL_LOG_MODULE_NAME "RBMConv2DConnection"
00040 
00041 #include "RBMConv2DConnection.h"
00042 #include <plearn/math/TMat_maths.h>
00043 #include <plearn/math/convolutions.h>
00044 #include <plearn/io/pl_log.h>
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(
00050     RBMConv2DConnection,
00051     "Filter between two linear layers of a 2D convolutional RBM",
00052     "");
00053 
00054 RBMConv2DConnection::RBMConv2DConnection( real the_learning_rate ) :
00055     inherited(the_learning_rate),
00056     down_image_length(-1),
00057     down_image_width(-1),
00058     up_image_length(-1),
00059     up_image_width(-1),
00060     kernel_step1(1),
00061     kernel_step2(1),
00062     kernel_length(-1),
00063     kernel_width(-1)
00064 {
00065 }
00066 
00067 void RBMConv2DConnection::declareOptions(OptionList& ol)
00068 {
00069     declareOption(ol, "down_image_length",
00070                   &RBMConv2DConnection::down_image_length,
00071                   OptionBase::buildoption,
00072                   "Length of the down image");
00073 
00074     declareOption(ol, "down_image_width",
00075                   &RBMConv2DConnection::down_image_width,
00076                   OptionBase::buildoption,
00077                   "Width of the down image");
00078 
00079     declareOption(ol, "up_image_length",
00080                   &RBMConv2DConnection::up_image_length,
00081                   OptionBase::buildoption,
00082                   "Length of the up image");
00083 
00084     declareOption(ol, "up_image_width",
00085                   &RBMConv2DConnection::up_image_width,
00086                   OptionBase::buildoption,
00087                   "Width of the up image");
00088 
00089     declareOption(ol, "kernel_step1", &RBMConv2DConnection::kernel_step1,
00090                   OptionBase::buildoption,
00091                   "\"Vertical\" step of the convolution");
00092 
00093     declareOption(ol, "kernel_step2", &RBMConv2DConnection::kernel_step2,
00094                   OptionBase::buildoption,
00095                   "\"Horizontal\" step of the convolution");
00096 
00097     declareOption(ol, "kernel", &RBMConv2DConnection::kernel,
00098                   OptionBase::learntoption,
00099                   "Matrix containing the convolution kernel (filter)");
00100 
00101     // Now call the parent class' declareOptions
00102     inherited::declareOptions(ol);
00103 
00104     redeclareOption(ol, "down_size",
00105                     &RBMConv2DConnection::down_size,
00106                     OptionBase::learntoption,
00107                     "Equals to down_image_length × down_image_width");
00108 
00109     redeclareOption(ol, "up_size",
00110                     &RBMConv2DConnection::up_size,
00111                     OptionBase::learntoption,
00112                     "Equals to up_image_length × up_image_width");
00113 }
00114 
00115 void RBMConv2DConnection::build_()
00116 {
00117     MODULE_LOG << "build_() called" << endl;
00118 
00119     down_size = down_image_length * down_image_width;
00120     up_size = up_image_length * up_image_width;
00121 
00122     PLASSERT( down_image_length > 0 );
00123     PLASSERT( down_image_width > 0 );
00124     PLASSERT( down_image_length * down_image_width == down_size );
00125     PLASSERT( up_image_length > 0 );
00126     PLASSERT( up_image_width > 0 );
00127     PLASSERT( up_image_length * up_image_width == up_size );
00128     PLASSERT( kernel_step1 > 0 );
00129     PLASSERT( kernel_step2 > 0 );
00130 
00131     kernel_length = down_image_length - kernel_step1 * (up_image_length-1);
00132     PLASSERT( kernel_length > 0 );
00133     kernel_width = down_image_width - kernel_step2 * (up_image_width-1);
00134     PLASSERT( kernel_width > 0 );
00135 
00136     output_size = 0;
00137     bool needs_forget = false; // do we need to reinitialize the parameters?
00138 
00139     if( kernel.length() != kernel_length ||
00140         kernel.width() != kernel_width )
00141     {
00142         kernel.resize( kernel_length, kernel_width );
00143         needs_forget = true;
00144     }
00145 
00146     kernel_pos_stats.resize( kernel_length, kernel_width );
00147     kernel_neg_stats.resize( kernel_length, kernel_width );
00148     kernel_gradient.resize( kernel_length, kernel_width );
00149 
00150     if( momentum != 0. )
00151         kernel_inc.resize( kernel_length, kernel_width );
00152 
00153     if( needs_forget )
00154         forget();
00155 
00156     clearStats();
00157 }
00158 
00159 void RBMConv2DConnection::build()
00160 {
00161     inherited::build();
00162     build_();
00163 }
00164 
00165 
00166 void RBMConv2DConnection::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00167 {
00168     inherited::makeDeepCopyFromShallowCopy(copies);
00169 
00170     deepCopyField(kernel, copies);
00171     deepCopyField(kernel_pos_stats, copies);
00172     deepCopyField(kernel_neg_stats, copies);
00173     deepCopyField(kernel_gradient, copies);
00174     deepCopyField(kernel_inc, copies);
00175     deepCopyField(down_image, copies);
00176     deepCopyField(up_image, copies);
00177     deepCopyField(down_image_gradient, copies);
00178     deepCopyField(up_image_gradient, copies);
00179 }
00180 
00181 void RBMConv2DConnection::accumulatePosStats( const Vec& down_values,
00182                                               const Vec& up_values )
00183 {
00184     down_image = down_values.toMat( down_image_length, down_image_width );
00185     up_image = up_values.toMat( up_image_length, up_image_width );
00186 
00187     /*  for i=0 to up_image_length:
00188      *   for j=0 to up_image_width:
00189      *     for l=0 to kernel_length:
00190      *       for m=0 to kernel_width:
00191      *         kernel_pos_stats(l,m) +=
00192      *           down_image(step1*i+l,step2*j+m) * up_image(i,j)
00193      */
00194     convolve2Dbackprop( down_image, up_image, kernel_pos_stats,
00195                         kernel_step1, kernel_step2, true );
00196 
00197     pos_count++;
00198 }
00199 
00200 void RBMConv2DConnection::accumulateNegStats( const Vec& down_values,
00201                                               const Vec& up_values )
00202 {
00203     down_image = down_values.toMat( down_image_length, down_image_width );
00204     up_image = up_values.toMat( up_image_length, up_image_width );
00205     /*  for i=0 to up_image_length:
00206      *   for j=0 to up_image_width:
00207      *     for l=0 to kernel_length:
00208      *       for m=0 to kernel_width:
00209      *         kernel_neg_stats(l,m) +=
00210      *           down_image(step1*i+l,step2*j+m) * up_image(i,j)
00211      */
00212     convolve2Dbackprop( down_image, up_image, kernel_neg_stats,
00213                         kernel_step1, kernel_step2, true );
00214 
00215     neg_count++;
00216 }
00217 
00218 void RBMConv2DConnection::update()
00219 {
00220     // updates parameters
00221     // kernel += learning_rate * (kernel_pos_stats/pos_count
00222     //                              - kernel_neg_stats/neg_count)
00223     real pos_factor = learning_rate / pos_count;
00224     real neg_factor = -learning_rate / neg_count;
00225 
00226     real* k_i = kernel.data();
00227     real* kps_i = kernel_pos_stats.data();
00228     real* kns_i = kernel_neg_stats.data();
00229     int k_mod = kernel.mod();
00230     int kps_mod = kernel_pos_stats.mod();
00231     int kns_mod = kernel_neg_stats.mod();
00232 
00233     if( momentum == 0. )
00234     {
00235         // no need to use weights_inc
00236         for( int i=0 ; i<kernel_length ; i++, k_i+=k_mod,
00237                                          kps_i+=kps_mod, kns_i+=kns_mod )
00238             for( int j=0 ; j<kernel_width ; j++ )
00239                 k_i[j] += pos_factor * kps_i[j] + neg_factor * kns_i[j];
00240     }
00241     else
00242     {
00243         // ensure that weights_inc has the right size
00244         kernel_inc.resize( kernel_length, kernel_width );
00245 
00246         // The update rule becomes:
00247         // kernel_inc = momentum * kernel_inc
00248         //               - learning_rate * (kernel_pos_stats/pos_count
00249         //                                  - kernel_neg_stats/neg_count);
00250         // kernel += kernel_inc;
00251         real* kinc_i = kernel_inc.data();
00252         int kinc_mod = kernel_inc.mod();
00253         for( int i=0 ; i<kernel_length ; i++, k_i += k_mod, kps_i += kps_mod,
00254                                          kns_i += kns_mod, kinc_i += kinc_mod )
00255             for( int j=0 ; j<kernel_width ; j++ )
00256             {
00257                 kinc_i[j] = momentum * kinc_i[j]
00258                     + pos_factor * kps_i[j] + neg_factor * kns_i[j];
00259                 k_i[j] += kinc_i[j];
00260             }
00261     }
00262 
00263     clearStats();
00264 }
00265 
00266 // Instead of using the statistics, we assume we have only one markov chain
00267 // runned and we update the parameters from the first 4 values of the chain
00268 void RBMConv2DConnection::update( const Vec& pos_down_values, // v_0
00269                                   const Vec& pos_up_values,   // h_0
00270                                   const Vec& neg_down_values, // v_1
00271                                   const Vec& neg_up_values )  // h_1
00272 {
00273     PLASSERT( pos_up_values.length() == up_size );
00274     PLASSERT( neg_up_values.length() == up_size );
00275     PLASSERT( pos_down_values.length() == down_size );
00276     PLASSERT( neg_down_values.length() == down_size );
00277 
00278     /*  for i=0 to up_image_length:
00279      *   for j=0 to up_image_width:
00280      *     for l=0 to kernel_length:
00281      *       for m=0 to kernel_width:
00282      *         kernel_neg_stats(l,m) += learning_rate *
00283      *           ( pos_down_image(step1*i+l,step2*j+m) * pos_up_image(i,j)
00284      *             - neg_down_image(step1*i+l,step2*j+m) * neg_up_image(i,j) )
00285      */
00286 
00287     real* puv = pos_up_values.data();
00288     real* nuv = neg_up_values.data();
00289     real* pdv = pos_down_values.data();
00290     real* ndv = neg_down_values.data();
00291     int k_mod = kernel.mod();
00292 
00293     if( momentum == 0. )
00294     {
00295         for( int i=0; i<up_image_length; i++,
00296                                          puv+=up_image_width,
00297                                          nuv+=up_image_width,
00298                                          pdv+=kernel_step1*down_image_width,
00299                                          ndv+=kernel_step1*down_image_width )
00300         {
00301             // copies to iterate over columns
00302             real* pdv1 = pdv;
00303             real* ndv1 = ndv;
00304             for( int j=0; j<up_image_width; j++,
00305                                             pdv1+=kernel_step2,
00306                                             ndv1+=kernel_step2 )
00307             {
00308                 real* k = kernel.data();
00309                 real* pdv2 = pdv1; // copy to iterate over sub-rows
00310                 real* ndv2 = ndv1;
00311                 real puv_ij = puv[j];
00312                 real nuv_ij = nuv[j];
00313                 for( int l=0; l<kernel_length; l++, k+=k_mod,
00314                                                pdv2+=down_image_width,
00315                                                ndv2+=down_image_width )
00316                     for( int m=0; m<kernel_width; m++ )
00317                         k[m] += learning_rate *
00318                             (pdv2[m] * puv_ij - ndv2[m] * nuv_ij);
00319             }
00320         }
00321     }
00322     else
00323     {
00324         // ensure that weights_inc has the right size
00325         kernel_inc.resize( kernel_length, kernel_width );
00326         kernel_inc *= momentum;
00327 
00328         int kinc_mod = kernel_inc.mod();
00329         for( int i=0; i<down_image_length; i++,
00330                                            puv+=up_image_width,
00331                                            nuv+=up_image_width,
00332                                            pdv+=kernel_step1*down_image_width,
00333                                            ndv+=kernel_step1*down_image_width )
00334         {
00335             // copies to iterate over columns
00336             real* pdv1 = pdv;
00337             real* ndv1 = ndv;
00338             for( int j=0; j<down_image_width; j++,
00339                                               pdv1+=kernel_step2,
00340                                               ndv1+=kernel_step2 )
00341             {
00342                 real* kinc = kernel_inc.data();
00343                 real* pdv2 = pdv1; // copy to iterate over sub-rows
00344                 real* ndv2 = ndv1;
00345                 real puv_ij = puv[j];
00346                 real nuv_ij = nuv[j];
00347                 for( int l=0; l<kernel_length; l++, kinc+=kinc_mod,
00348                                                pdv2+=down_image_width,
00349                                                ndv2+=down_image_width )
00350                     for( int m=0; m<kernel_width; m++ )
00351                         kinc[m] += pdv2[m] * puv_ij - ndv2[m] * nuv_ij;
00352             }
00353         }
00354         multiplyAcc( kernel, kernel_inc, learning_rate );
00355     }
00356 }
00357 
00358 void RBMConv2DConnection::update( const Mat& pos_down_values, // v_0
00359                                   const Mat& pos_up_values,   // h_0
00360                                   const Mat& neg_down_values, // v_1
00361                                   const Mat& neg_up_values )  // h_1
00362 {
00363     PLASSERT( pos_up_values.width() == up_size );
00364     PLASSERT( neg_up_values.width() == up_size );
00365     PLASSERT( pos_down_values.width() == down_size );
00366     PLASSERT( neg_down_values.width() == down_size );
00367 
00368     int batch_size = pos_down_values.length();
00369     PLASSERT( pos_up_values.length() == batch_size );
00370     PLASSERT( neg_down_values.length() == batch_size );
00371     PLASSERT( neg_up_values.length() == batch_size );
00372 
00373     real norm_lr = learning_rate / batch_size;
00374 
00375     /*  for i=0 to up_image_length:
00376      *   for j=0 to up_image_width:
00377      *     for l=0 to kernel_length:
00378      *       for m=0 to kernel_width:
00379      *         kernel_neg_stats(l,m) += learning_rate *
00380      *           ( pos_down_image(step1*i+l,step2*j+m) * pos_up_image(i,j)
00381      *             - neg_down_image(step1*i+l,step2*j+m) * neg_up_image(i,j) )
00382      */
00383 
00384     if( momentum == 0. )
00385     {
00386         for( int b=0; b<batch_size; b++ )
00387         {
00388             real* puv = pos_up_values(b).data();
00389             real* nuv = neg_up_values(b).data();
00390             real* pdv = pos_down_values(b).data();
00391             real* ndv = neg_down_values(b).data();
00392             int k_mod = kernel.mod();
00393 
00394             for( int i=0; i<up_image_length;
00395                  i++,
00396                  puv+=up_image_width,
00397                  nuv+=up_image_width,
00398                  pdv+=kernel_step1*down_image_width,
00399                  ndv+=kernel_step1*down_image_width )
00400             {
00401                 // copies to iterate over columns
00402                 real* pdv1 = pdv;
00403                 real* ndv1 = ndv;
00404                 for( int j=0; j<up_image_width; j++,
00405                                                 pdv1+=kernel_step2,
00406                                                 ndv1+=kernel_step2 )
00407                 {
00408                     real* k = kernel.data();
00409                     real* pdv2 = pdv1; // copy to iterate over sub-rows
00410                     real* ndv2 = ndv1;
00411                     real puv_ij = puv[j];
00412                     real nuv_ij = nuv[j];
00413                     for( int l=0; l<kernel_length; l++, k+=k_mod,
00414                                                    pdv2+=down_image_width,
00415                                                    ndv2+=down_image_width )
00416                         for( int m=0; m<kernel_width; m++ )
00417                             k[m] += norm_lr *
00418                                 (pdv2[m] * puv_ij - ndv2[m] * nuv_ij);
00419                 }
00420             }
00421         }
00422     }
00423     else
00424         PLCHECK_MSG(false,
00425                     "mini-batch and momentum don't work together yet");
00426 }
00427 
00428 void RBMConv2DConnection::clearStats()
00429 {
00430     kernel_pos_stats.clear();
00431     kernel_neg_stats.clear();
00432 
00433     pos_count = 0;
00434     neg_count = 0;
00435 }
00436 
00437 void RBMConv2DConnection::computeProduct
00438     ( int start, int length, const Vec& activations, bool accumulate ) const
00439 {
00440     // Unoptimized way, that computes all the activations and return a subvec
00441     PLASSERT( activations.length() == length );
00442     if( going_up )
00443     {
00444         PLASSERT( start+length <= up_size );
00445         down_image = input_vec.toMat( down_image_length, down_image_width );
00446 
00447         // special cases:
00448         if( length == 1 )
00449         {
00450             real act = 0;
00451             real* k = kernel.data();
00452             real* di = down_image.data()
00453                         + kernel_step1*(start / down_image_width)
00454                         + kernel_step2*(start % down_image_width);
00455             for( int l=0; l<kernel_length; l++, di+=down_image_width )
00456                 for( int m=0; m<kernel_width; m++ )
00457                     act += di[m] * k[m];
00458             if( accumulate )
00459                 activations[0] += act;
00460             else
00461                 activations[0] = act;
00462         }
00463         else if( start == 0 && length == up_size )
00464         {
00465             up_image = activations.toMat( up_image_length, up_image_width );
00466             convolve2D( down_image, kernel, up_image,
00467                         kernel_step1, kernel_step2, accumulate );
00468         }
00469         else
00470         {
00471             up_image = Mat( up_image_length, up_image_width );
00472             convolve2D( down_image, kernel, up_image,
00473                         kernel_step1, kernel_step2, false );
00474             if( accumulate )
00475                 activations += up_image.toVec().subVec( start, length );
00476             else
00477                 activations << up_image.toVec().subVec( start, length );
00478         }
00479     }
00480     else
00481     {
00482         PLASSERT( start+length <= down_size );
00483         up_image = input_vec.toMat( up_image_length, up_image_width );
00484 
00485         // special cases
00486         if( start == 0 && length == down_size )
00487         {
00488             down_image = activations.toMat( down_image_length,
00489                                             down_image_width );
00490             backConvolve2D( down_image, kernel, up_image,
00491                             kernel_step1, kernel_step2, accumulate );
00492         }
00493         else
00494         {
00495             down_image = Mat( down_image_length, down_image_width );
00496             backConvolve2D( down_image, kernel, up_image,
00497                             kernel_step1, kernel_step2, false );
00498             if( accumulate )
00499                 activations += down_image.toVec().subVec( start, length );
00500             else
00501                 activations << down_image.toVec().subVec( start, length );
00502         }
00503     }
00504 }
00505 
00506 void RBMConv2DConnection::computeProducts(int start, int length,
00507                                           Mat& activations,
00508                                           bool accumulate) const
00509 {
00510     PLASSERT( activations.width() == length );
00511     int batch_size = inputs_mat.length();
00512     activations.resize( batch_size, length);
00513     if( going_up )
00514     {
00515         PLASSERT( start+length <= up_size );
00516         // usual case
00517         if( start == 0 && length == up_size )
00518             for( int k=0; k<batch_size; k++ )
00519             {
00520                 up_image = activations(k)
00521                     .toMat(up_image_length, up_image_width);
00522                 down_image = inputs_mat(k)
00523                     .toMat(down_image_length, down_image_width);
00524 
00525                 convolve2D(down_image, kernel, up_image,
00526                            kernel_step1, kernel_step2, accumulate);
00527             }
00528         else
00529             PLCHECK_MSG(false,
00530                         "Unusual case of use (start!=0 or length!=up_size)\n"
00531                         "not implemented yet.");
00532     }
00533     else
00534     {
00535         PLASSERT( start+length <= down_size );
00536         // usual case
00537         if( start == 0 && length == down_size )
00538             for( int k=0; k<batch_size; k++ )
00539             {
00540                 up_image = inputs_mat(k)
00541                     .toMat(up_image_length, up_image_width);
00542                 down_image = activations(k)
00543                     .toMat(down_image_length, down_image_width);
00544 
00545                 backConvolve2D(down_image, kernel, up_image,
00546                                kernel_step1, kernel_step2, accumulate);
00547             }
00548         else
00549             PLCHECK_MSG(false,
00550                         "Unusual case of use (start!=0 or length!=down_size)\n"
00551                         "not implemented yet.");
00552     }
00553 }
00554 
00556 void RBMConv2DConnection::bpropUpdate(const Vec& input, const Vec& output,
00557                                       Vec& input_gradient,
00558                                       const Vec& output_gradient,
00559                                       bool accumulate)
00560 {
00561     PLASSERT( input.size() == down_size );
00562     PLASSERT( output.size() == up_size );
00563     PLASSERT( output_gradient.size() == up_size );
00564 
00565     if( accumulate )
00566     {
00567         PLASSERT_MSG( input_gradient.size() == down_size,
00568                       "Cannot resize input_gradient AND accumulate into it" );
00569     }
00570     else
00571         input_gradient.resize( down_size );
00572 
00573     down_image = input.toMat( down_image_length, down_image_width );
00574     up_image = output.toMat( up_image_length, up_image_width );
00575     down_image_gradient = input_gradient.toMat( down_image_length,
00576                                                 down_image_width );
00577     up_image_gradient = output_gradient.toMat( up_image_length,
00578                                                up_image_width );
00579 
00580     // update input_gradient and kernel_gradient
00581     convolve2Dbackprop( down_image, kernel,
00582                         up_image_gradient, down_image_gradient,
00583                         kernel_gradient,
00584                         kernel_step1, kernel_step2, accumulate );
00585 
00586     // kernel -= learning_rate * kernel_gradient
00587     multiplyAcc( kernel, kernel_gradient, -learning_rate );
00588 }
00589 
00590 void RBMConv2DConnection::bpropUpdate(const Mat& inputs, const Mat& outputs,
00591                                       Mat& input_gradients,
00592                                       const Mat& output_gradients,
00593                                       bool accumulate)
00594 {
00595     PLASSERT( inputs.width() == down_size );
00596     PLASSERT( outputs.width() == up_size );
00597     PLASSERT( output_gradients.width() == up_size );
00598 
00599     int batch_size = inputs.length();
00600     PLASSERT( outputs.length() == batch_size );
00601     PLASSERT( output_gradients.length() == batch_size );
00602 
00603     if( accumulate )
00604     {
00605         PLASSERT_MSG( input_gradients.width() == down_size &&
00606                       input_gradients.length() == batch_size,
00607                       "Cannot resize input_gradient AND accumulate into it" );
00608     }
00609     else
00610     {
00611         input_gradients.resize(batch_size, down_size);
00612         input_gradients.clear();
00613     }
00614 
00615     kernel_gradient.clear();
00616     for( int k=0; k<batch_size; k++ )
00617     {
00618         down_image = inputs(k).toMat( down_image_length, down_image_width );
00619         up_image = outputs(k).toMat( up_image_length, up_image_width );
00620         down_image_gradient = input_gradients(k)
00621             .toMat( down_image_length, down_image_width );
00622         up_image_gradient = output_gradients(k)
00623             .toMat( up_image_length, up_image_width );
00624 
00625         // update input_gradient and kernel_gradient
00626         convolve2Dbackprop( down_image, kernel,
00627                             up_image_gradient, down_image_gradient,
00628                             kernel_gradient,
00629                             kernel_step1, kernel_step2, true );
00630     }
00631 
00632     // kernel -= learning_rate/n * kernel_gradient
00633     multiplyAcc( kernel, kernel_gradient, -learning_rate/batch_size );
00634 }
00635 
00637 // bpropAccUpdate //
00639 void RBMConv2DConnection::bpropAccUpdate(const TVec<Mat*>& ports_value,
00640                                          const TVec<Mat*>& ports_gradient)
00641 {
00642     PLASSERT( ports_value.length() == nPorts()
00643               && ports_gradient.length() == nPorts() );
00644 
00645     Mat* down = ports_value[0];
00646     Mat* up = ports_value[1];
00647     Mat* down_grad = ports_gradient[0];
00648     Mat* up_grad = ports_gradient[1];
00649 
00650     PLASSERT( down && !down->isEmpty() );
00651     PLASSERT( up && !up->isEmpty() );
00652 
00653     int batch_size = down->length();
00654     PLASSERT( up->length() == batch_size );
00655 
00656     // If we have up_grad
00657     if( up_grad && !up_grad->isEmpty() )
00658     {
00659         // down_grad should not be provided
00660         PLASSERT( !down_grad || down_grad->isEmpty() );
00661         PLASSERT( up_grad->length() == batch_size );
00662         PLASSERT( up_grad->width() == up_size );
00663 
00664         // If we want down_grad
00665         bool compute_down_grad = false;
00666         if( down_grad && down_grad->isEmpty() )
00667         {
00668             PLASSERT( down_grad->width() == down_size );
00669             down_grad->resize(batch_size, down_size);
00670             compute_down_grad = true;
00671         }
00672 
00673         kernel_gradient.clear();
00674         for (int k=0; k<batch_size; k++)
00675         {
00676             down_image = (*down)(k).toMat(down_image_length, down_image_width);
00677             up_image = (*up)(k).toMat(up_image_length, up_image_width);
00678             up_image_gradient = (*up_grad)(k)
00679                 .toMat(up_image_length, up_image_width);
00680 
00681             if( compute_down_grad )
00682             {
00683                 down_image_gradient = (*down_grad)(k)
00684                     .toMat(down_image_length, down_image_width);
00685                 convolve2Dbackprop(down_image, kernel,
00686                                    up_image_gradient, down_image_gradient,
00687                                    kernel_gradient,
00688                                    kernel_step1, kernel_step2, true);
00689             }
00690             else
00691                 convolve2Dbackprop(down_image, up_image_gradient,
00692                                    kernel_gradient,
00693                                    kernel_step1, kernel_step2, true);
00694         }
00695         // kernel -= learning_rate/n * kernel_gradient
00696         multiplyAcc(kernel, kernel_gradient, -learning_rate/batch_size);
00697     }
00698     else if( down_grad && !down_grad->isEmpty() )
00699     {
00700         PLASSERT( down_grad->length() == batch_size );
00701         PLASSERT( down_grad->width() == down_size );
00702 
00703         // If we want up_grad
00704         bool compute_up_grad = false;
00705         if( up_grad && up_grad->isEmpty() )
00706         {
00707             PLASSERT( up_grad->width() == up_size );
00708             up_grad->resize(batch_size, up_size);
00709             compute_up_grad = true;
00710         }
00711 
00712         kernel_gradient.clear();
00713         for (int k=0; k<batch_size; k++)
00714         {
00715             down_image = (*down)(k).toMat(down_image_length, down_image_width);
00716             up_image = (*up)(k).toMat(up_image_length, up_image_width);
00717             down_image_gradient = (*down_grad)(k)
00718                 .toMat(down_image_length, down_image_width);
00719 
00720             if( compute_up_grad )
00721             {
00722                 up_image_gradient = (*up_grad)(k)
00723                     .toMat(up_image_length, up_image_width);
00724                 backConvolve2Dbackprop(kernel, up_image, up_image_gradient,
00725                                        down_image_gradient, kernel_gradient,
00726                                        kernel_step1, kernel_step2, true);
00727             }
00728             else
00729                 backConvolve2Dbackprop(up_image, down_image_gradient,
00730                                        kernel_gradient,
00731                                        kernel_step1, kernel_step2, true);
00732         }
00733         // kernel -= learning_rate/n * kernel_gradient
00734         multiplyAcc(kernel, kernel_gradient, -learning_rate/batch_size);
00735     }
00736     else
00737         PLCHECK_MSG( false,
00738                      "Unknown port configuration" );
00739 }
00740 
00741 
00744 void RBMConv2DConnection::forget()
00745 {
00746     clearStats();
00747     if( initialization_method == "zero" )
00748         kernel.clear();
00749     else
00750     {
00751         if( !random_gen )
00752         {
00753             PLWARNING( "RBMConv2DConnection: cannot forget() without"
00754                        " random_gen" );
00755             return;
00756         }
00757 
00758         real d = 1. / max( kernel_length, kernel_width );
00759         if( initialization_method == "uniform_sqrt" )
00760             d = sqrt( d );
00761 
00762         random_gen->fill_random_uniform( kernel, -d, d );
00763     }
00764 }
00765 
00766 
00767 /* THIS METHOD IS OPTIONAL
00772 void RBMConv2DConnection::finalize()
00773 {
00774 }
00775 */
00776 
00778 int RBMConv2DConnection::nParameters() const
00779 {
00780     return kernel.size();
00781 }
00782 
00788 Vec RBMConv2DConnection::makeParametersPointHere(const Vec& global_parameters)
00789 {
00790     int n=kernel.size();
00791     int m = global_parameters.size();
00792     if (m<n)
00793         PLERROR("RBMConv2DConnection::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n);
00794     real* p = global_parameters.data();
00795     kernel.makeSharedValue(p,n);
00796     return global_parameters.subVec(n,m-n);
00797 }
00798 
00799 
00800 
00801 } // end of namespace PLearn
00802 
00803 
00804 /*
00805   Local Variables:
00806   mode:c++
00807   c-basic-offset:4
00808   c-file-style:"stroustrup"
00809   c-file-offsets:((innamespace . 0)(inline-open . 0))
00810   indent-tabs-mode:nil
00811   fill-column:79
00812   End:
00813 */
00814 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines