PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // RBMConv2DLLParameters.cc 00004 // 00005 // Copyright (C) 2006 Pascal Lamblin 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pascal Lamblin 00036 00039 #define PL_LOG_MODULE_NAME "RBMConv2DLLParameters" 00040 #include <plearn/io/pl_log.h> 00041 00042 #include "RBMConv2DLLParameters.h" 00043 #include <plearn/math/TMat_maths.h> 00044 #include <plearn/math/convolutions.h> 00045 00046 namespace PLearn { 00047 using namespace std; 00048 00049 PLEARN_IMPLEMENT_OBJECT( 00050 RBMConv2DLLParameters, 00051 "Filter between two linear layers of a 2D convolutional RBM", 00052 ""); 00053 00054 RBMConv2DLLParameters::RBMConv2DLLParameters( real the_learning_rate ) : 00055 inherited(the_learning_rate), 00056 momentum(0.) 00057 { 00058 } 00059 00060 RBMConv2DLLParameters::RBMConv2DLLParameters( string down_types, 00061 string up_types, 00062 real the_learning_rate ) : 00063 inherited( down_types, up_types, the_learning_rate ), 00064 momentum(0.) 00065 { 00066 // We're not sure inherited::build() has been called 00067 build(); 00068 } 00069 00070 void RBMConv2DLLParameters::declareOptions(OptionList& ol) 00071 { 00072 declareOption(ol, "momentum", &RBMConv2DLLParameters::momentum, 00073 OptionBase::buildoption, 00074 "Momentum factor (should be between 0 and 1)"); 00075 00076 declareOption(ol, "down_image_length", 00077 &RBMConv2DLLParameters::down_image_length, 00078 OptionBase::buildoption, 00079 "Length of the down image"); 00080 00081 declareOption(ol, "down_image_width", 00082 &RBMConv2DLLParameters::down_image_width, 00083 OptionBase::buildoption, 00084 "Width of the down image"); 00085 00086 declareOption(ol, "up_image_length", 00087 &RBMConv2DLLParameters::up_image_length, 00088 OptionBase::buildoption, 00089 "Length of the up image"); 00090 00091 declareOption(ol, "up_image_width", 00092 &RBMConv2DLLParameters::up_image_width, 00093 OptionBase::buildoption, 00094 "Width of the up image"); 00095 00096 declareOption(ol, "kernel_step1", &RBMConv2DLLParameters::kernel_step1, 00097 OptionBase::buildoption, 00098 "\"Vertical\" step of the convolution"); 00099 00100 declareOption(ol, "kernel_step2", &RBMConv2DLLParameters::kernel_step2, 00101 OptionBase::buildoption, 00102 "\"Horizontal\" step of the convolution"); 00103 00104 declareOption(ol, "kernel", &RBMConv2DLLParameters::kernel, 00105 OptionBase::learntoption, 00106 "Matrix containing the convolution kernel (filter)"); 00107 00108 declareOption(ol, "up_units_bias", 00109 &RBMConv2DLLParameters::up_units_bias, 00110 OptionBase::learntoption, 00111 "Element i contains the bias of up unit i"); 00112 00113 declareOption(ol, "down_units_bias", 00114 &RBMConv2DLLParameters::down_units_bias, 00115 OptionBase::learntoption, 00116 "Element i contains the bias of down unit i"); 00117 00118 // Now call the parent class' declareOptions 00119 inherited::declareOptions(ol); 00120 } 00121 00122 void RBMConv2DLLParameters::build_() 00123 { 00124 MODULE_LOG << "build_() called" << endl; 00125 if( up_layer_size == 0 || down_layer_size == 0 ) 00126 { 00127 MODULE_LOG << "build_() aborted" << endl; 00128 return; 00129 } 00130 00131 PLASSERT( down_image_length > 0 ); 00132 PLASSERT( down_image_width > 0 ); 00133 PLASSERT( down_image_length * down_image_width == down_layer_size ); 00134 PLASSERT( up_image_length > 0 ); 00135 PLASSERT( up_image_width > 0 ); 00136 PLASSERT( up_image_length * up_image_width == up_layer_size ); 00137 PLASSERT( kernel_step1 > 0 ); 00138 PLASSERT( kernel_step2 > 0 ); 00139 00140 kernel_length = down_image_length - kernel_step1 * (up_image_length-1); 00141 PLASSERT( kernel_length > 0 ); 00142 kernel_width = down_image_width - kernel_step2 * (up_image_width-1); 00143 PLASSERT( kernel_width > 0 ); 00144 00145 output_size = 0; 00146 bool needs_forget = false; // do we need to reinitialize the parameters? 00147 00148 if( kernel.length() != kernel_length || 00149 kernel.width() != kernel_width ) 00150 { 00151 kernel.resize( kernel_length, kernel_width ); 00152 needs_forget = true; 00153 } 00154 00155 kernel_pos_stats.resize( kernel_length, kernel_width ); 00156 kernel_neg_stats.resize( kernel_length, kernel_width ); 00157 kernel_gradient.resize( kernel_length, kernel_width ); 00158 00159 down_units_bias.resize( down_layer_size ); 00160 down_units_bias_pos_stats.resize( down_layer_size ); 00161 down_units_bias_neg_stats.resize( down_layer_size ); 00162 for( int i=0 ; i<down_layer_size ; i++ ) 00163 { 00164 char dut_i = down_units_types[i]; 00165 if( dut_i != 'l' ) // not linear activation unit 00166 PLERROR( "RBMConv2DLLParameters::build_() - value '%c' for" 00167 " down_units_types[%d]\n" 00168 "should be 'l'.\n", 00169 dut_i, i ); 00170 } 00171 00172 up_units_bias.resize( up_layer_size ); 00173 up_units_bias_pos_stats.resize( up_layer_size ); 00174 up_units_bias_neg_stats.resize( up_layer_size ); 00175 for( int i=0 ; i<up_layer_size ; i++ ) 00176 { 00177 char uut_i = up_units_types[i]; 00178 if( uut_i != 'l' ) // not linear activation unit 00179 PLERROR( "RBMConv2DLLParameters::build_() - value '%c' for" 00180 " up_units_types[%d]\n" 00181 "should be 'l'.\n", 00182 uut_i, i ); 00183 } 00184 00185 if( momentum != 0. ) 00186 { 00187 kernel_inc.resize( kernel_length, kernel_width ); 00188 down_units_bias_inc.resize( down_layer_size ); 00189 up_units_bias_inc.resize( up_layer_size ); 00190 } 00191 00192 if( needs_forget ) 00193 forget(); 00194 00195 clearStats(); 00196 } 00197 00198 void RBMConv2DLLParameters::build() 00199 { 00200 inherited::build(); 00201 build_(); 00202 } 00203 00204 00205 void RBMConv2DLLParameters::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00206 { 00207 inherited::makeDeepCopyFromShallowCopy(copies); 00208 00209 deepCopyField(kernel, copies); 00210 deepCopyField(up_units_bias, copies); 00211 deepCopyField(down_units_bias, copies); 00212 deepCopyField(kernel_pos_stats, copies); 00213 deepCopyField(kernel_neg_stats, copies); 00214 deepCopyField(kernel_gradient, copies); 00215 deepCopyField(up_units_bias_pos_stats, copies); 00216 deepCopyField(up_units_bias_neg_stats, copies); 00217 deepCopyField(down_units_bias_pos_stats, copies); 00218 deepCopyField(down_units_bias_neg_stats, copies); 00219 deepCopyField(kernel_inc, copies); 00220 deepCopyField(up_units_bias_inc, copies); 00221 deepCopyField(down_units_bias_inc, copies); 00222 deepCopyField(down_image, copies); 00223 deepCopyField(up_image, copies); 00224 deepCopyField(down_image_gradient, copies); 00225 deepCopyField(up_image_gradient, copies); 00226 } 00227 00228 void RBMConv2DLLParameters::accumulatePosStats( const Vec& down_values, 00229 const Vec& up_values ) 00230 { 00231 down_image = down_values.toMat( down_image_length, down_image_width ); 00232 up_image = up_values.toMat( up_image_length, up_image_width ); 00233 00234 /* for i=0 to up_image_length: 00235 * for j=0 to up_image_width: 00236 * for l=0 to kernel_length: 00237 * for m=0 to kernel_width: 00238 * kernel_pos_stats(l,m) += 00239 * down_image(step1*i+l,step2*j+m) * up_image(i,j) 00240 */ 00241 convolve2Dbackprop( down_image, up_image, kernel_pos_stats, 00242 kernel_step1, kernel_step2, true ); 00243 00244 down_units_bias_pos_stats += down_values; 00245 up_units_bias_pos_stats += up_values; 00246 00247 pos_count++; 00248 } 00249 00250 void RBMConv2DLLParameters::accumulateNegStats( const Vec& down_values, 00251 const Vec& up_values ) 00252 { 00253 down_image = down_values.toMat( down_image_length, down_image_width ); 00254 up_image = up_values.toMat( up_image_length, up_image_width ); 00255 /* for i=0 to up_image_length: 00256 * for j=0 to up_image_width: 00257 * for l=0 to kernel_length: 00258 * for m=0 to kernel_width: 00259 * kernel_neg_stats(l,m) += 00260 * down_image(step1*i+l,step2*j+m) * up_image(i,j) 00261 */ 00262 convolve2Dbackprop( down_image, up_image, kernel_neg_stats, 00263 kernel_step1, kernel_step2, true ); 00264 00265 down_units_bias_neg_stats += down_values; 00266 up_units_bias_neg_stats += up_values; 00267 00268 neg_count++; 00269 } 00270 00271 void RBMConv2DLLParameters::update() 00272 { 00273 // updates parameters 00274 // kernel -= learning_rate * (kernel_pos_stats/pos_count 00275 // - kernel_neg_stats/neg_count) 00276 real pos_factor = -learning_rate / pos_count; 00277 real neg_factor = learning_rate / neg_count; 00278 00279 real* k_i = kernel.data(); 00280 real* kps_i = kernel_pos_stats.data(); 00281 real* kns_i = kernel_neg_stats.data(); 00282 int k_mod = kernel.mod(); 00283 int kps_mod = kernel_pos_stats.mod(); 00284 int kns_mod = kernel_neg_stats.mod(); 00285 00286 if( momentum == 0. ) 00287 { 00288 // no need to use weights_inc 00289 for( int i=0 ; i<kernel_length ; i++, k_i+=k_mod, 00290 kps_i+=kps_mod, kns_i+=kns_mod ) 00291 for( int j=0 ; j<kernel_width ; j++ ) 00292 k_i[j] += pos_factor * kps_i[j] + neg_factor * kns_i[j]; 00293 } 00294 else 00295 { 00296 // ensure that weights_inc has the right size 00297 kernel_inc.resize( kernel_length, kernel_width ); 00298 00299 // The update rule becomes: 00300 // kernel_inc = momentum * kernel_inc 00301 // - learning_rate * (kernel_pos_stats/pos_count 00302 // - kernel_neg_stats/neg_count); 00303 // kernel += kernel_inc; 00304 real* kinc_i = kernel_inc.data(); 00305 int kinc_mod = kernel_inc.mod(); 00306 for( int i=0 ; i<kernel_length ; i++, k_i += k_mod, kps_i += kps_mod, 00307 kns_i += kns_mod, kinc_i += kinc_mod ) 00308 for( int j=0 ; j<kernel_width ; j++ ) 00309 { 00310 kinc_i[j] = momentum * kinc_i[j] 00311 + pos_factor * kps_i[j] + neg_factor * kns_i[j]; 00312 k_i[j] += kinc_i[j]; 00313 } 00314 } 00315 00316 // down_units_bias -= learning_rate * (down_units_bias_pos_stats/pos_count 00317 // -down_units_bias_neg_stats/neg_count) 00318 real* dub = down_units_bias.data(); 00319 real* dubps = down_units_bias_pos_stats.data(); 00320 real* dubns = down_units_bias_neg_stats.data(); 00321 00322 if( momentum == 0. ) 00323 { 00324 // no need to use down_units_bias_inc 00325 for( int i=0 ; i<down_layer_size ; i++ ) 00326 dub[i] += pos_factor * dubps[i] + neg_factor * dubns[i]; 00327 } 00328 else 00329 { 00330 // ensure that down_units_bias_inc has the right size 00331 down_units_bias_inc.resize( down_layer_size ); 00332 00333 // The update rule becomes: 00334 // down_units_bias_inc = 00335 // momentum * down_units_bias_inc 00336 // - learning_rate * (down_units_bias_pos_stats/pos_count 00337 // -down_units_bias_neg_stats/neg_count); 00338 // down_units_bias += down_units_bias_inc; 00339 real* dubinc = down_units_bias_inc.data(); 00340 for( int i=0 ; i<down_layer_size ; i++ ) 00341 { 00342 dubinc[i] = momentum * dubinc[i] 00343 + pos_factor * dubps[i] + neg_factor * dubns[i]; 00344 dub[i] += dubinc[i]; 00345 } 00346 } 00347 00348 // up_units_bias -= learning_rate * (up_units_bias_pos_stats/pos_count 00349 // -up_units_bias_neg_stats/neg_count) 00350 real* uub = up_units_bias.data(); 00351 real* uubps = up_units_bias_pos_stats.data(); 00352 real* uubns = up_units_bias_neg_stats.data(); 00353 if( momentum == 0. ) 00354 { 00355 // no need to use up_units_bias_inc 00356 for( int i=0 ; i<up_layer_size ; i++ ) 00357 uub[i] += pos_factor * uubps[i] + neg_factor * uubns[i]; 00358 } 00359 else 00360 { 00361 // ensure that up_units_bias_inc has the right size 00362 up_units_bias_inc.resize( up_layer_size ); 00363 00364 // The update rule becomes: 00365 // up_units_bias_inc = 00366 // momentum * up_units_bias_inc 00367 // - learning_rate * (up_units_bias_pos_stats/pos_count 00368 // -up_units_bias_neg_stats/neg_count); 00369 // up_units_bias += up_units_bias_inc; 00370 real* uubinc = up_units_bias_inc.data(); 00371 for( int i=0 ; i<up_layer_size ; i++ ) 00372 { 00373 uubinc[i] = momentum * uubinc[i] 00374 + pos_factor * uubps[i] + neg_factor * uubns[i]; 00375 uub[i] += uubinc[i]; 00376 } 00377 } 00378 00379 clearStats(); 00380 } 00381 00382 // Instead of using the statistics, we assume we have only one markov chain 00383 // runned and we update the parameters from the first 4 values of the chain 00384 void RBMConv2DLLParameters::update( const Vec& pos_down_values, // v_0 00385 const Vec& pos_up_values, // h_0 00386 const Vec& neg_down_values, // v_1 00387 const Vec& neg_up_values ) // h_1 00388 { 00389 PLASSERT( pos_up_values.length() == up_layer_size ); 00390 PLASSERT( neg_up_values.length() == up_layer_size ); 00391 PLASSERT( pos_down_values.length() == down_layer_size ); 00392 PLASSERT( neg_down_values.length() == down_layer_size ); 00393 00394 /* for i=0 to up_image_length: 00395 * for j=0 to up_image_width: 00396 * for l=0 to kernel_length: 00397 * for m=0 to kernel_width: 00398 * kernel_neg_stats(l,m) -= learning_rate * 00399 * ( pos_down_image(step1*i+l,step2*j+m) * pos_up_image(i,j) 00400 * - neg_down_image(step1*i+l,step2*j+m) * neg_up_image(i,j) ) 00401 */ 00402 00403 real* puv = pos_up_values.data(); 00404 real* nuv = neg_up_values.data(); 00405 real* pdv = pos_down_values.data(); 00406 real* ndv = neg_down_values.data(); 00407 int k_mod = kernel.mod(); 00408 00409 if( momentum == 0. ) 00410 { 00411 for( int i=0; i<up_image_length; i++, 00412 puv+=up_image_width, 00413 nuv+=up_image_width, 00414 pdv+=kernel_step1*down_image_width, 00415 ndv+=kernel_step1*down_image_width ) 00416 { 00417 // copies to iterate over columns 00418 real* pdv1 = pdv; 00419 real* ndv1 = ndv; 00420 for( int j=0; j<up_image_width; j++, 00421 pdv1+=kernel_step2, 00422 ndv1+=kernel_step2 ) 00423 { 00424 real* k = kernel.data(); 00425 real* pdv2 = pdv1; // copy to iterate over sub-rows 00426 real* ndv2 = ndv1; 00427 real puv_ij = puv[j]; 00428 real nuv_ij = nuv[j]; 00429 for( int l=0; l<kernel_length; l++, k+=k_mod, 00430 pdv2+=down_image_width, 00431 ndv2+=down_image_width ) 00432 for( int m=0; m<kernel_width; m++ ) 00433 k[m] += learning_rate * 00434 (ndv2[m] * nuv_ij - pdv2[m] * puv_ij); 00435 } 00436 } 00437 } 00438 else 00439 { 00440 // ensure that weights_inc has the right size 00441 kernel_inc.resize( kernel_length, kernel_width ); 00442 kernel_inc *= momentum; 00443 00444 int kinc_mod = kernel_inc.mod(); 00445 for( int i=0; i<down_image_length; i++, 00446 puv+=up_image_width, 00447 nuv+=up_image_width, 00448 pdv+=kernel_step1*down_image_width, 00449 ndv+=kernel_step1*down_image_width ) 00450 { 00451 // copies to iterate over columns 00452 real* pdv1 = pdv; 00453 real* ndv1 = ndv; 00454 for( int j=0; j<down_image_width; j++, 00455 pdv1+=kernel_step2, 00456 ndv1+=kernel_step2 ) 00457 { 00458 real* kinc = kernel_inc.data(); 00459 real* pdv2 = pdv1; // copy to iterate over sub-rows 00460 real* ndv2 = ndv1; 00461 real puv_ij = puv[j]; 00462 real nuv_ij = nuv[j]; 00463 for( int l=0; l<kernel_length; l++, kinc+=kinc_mod, 00464 pdv2+=down_image_width, 00465 ndv2+=down_image_width ) 00466 for( int m=0; m<kernel_width; m++ ) 00467 kinc[m] += ndv2[m] * nuv_ij - pdv2[m] * puv_ij; 00468 } 00469 } 00470 multiplyAcc( kernel, kernel_inc, learning_rate ); 00471 } 00472 00473 // down_units_bias -= learning_rate * ( v_0 - v_1 ) 00474 00475 real* dub = down_units_bias.data(); 00476 // pdv and ndv didn't change since last time 00477 // real* pdv = pos_down_values.data(); 00478 // real* ndv = neg_down_values.data(); 00479 00480 if( momentum == 0. ) 00481 { 00482 // no need to use down_units_bias_inc 00483 for( int j=0 ; j<down_layer_size ; j++ ) 00484 dub[j] += learning_rate * ( ndv[j] - pdv[j] ); 00485 } 00486 else 00487 { 00488 // ensure that down_units_bias_inc has the right size 00489 down_units_bias_inc.resize( down_layer_size ); 00490 00491 // The update rule becomes: 00492 // down_units_bias_inc = momentum * down_units_bias_inc 00493 // - learning_rate * ( v_0 - v_1 ) 00494 // down_units_bias += down_units_bias_inc; 00495 00496 real* dubinc = down_units_bias_inc.data(); 00497 for( int j=0 ; j<down_layer_size ; j++ ) 00498 { 00499 dubinc[j] = momentum * dubinc[j] 00500 + learning_rate * ( ndv[j] - pdv[j] ); 00501 dub[j] += dubinc[j]; 00502 } 00503 } 00504 00505 // up_units_bias -= learning_rate * ( h_0 - h_1 ) 00506 real* uub = up_units_bias.data(); 00507 puv = pos_up_values.data(); 00508 nuv = neg_up_values.data(); 00509 00510 if( momentum == 0. ) 00511 { 00512 // no need to use up_units_bias_inc 00513 for( int i=0 ; i<up_layer_size ; i++ ) 00514 uub[i] += learning_rate * (nuv[i] - puv[i] ); 00515 } 00516 else 00517 { 00518 // ensure that up_units_bias_inc has the right size 00519 up_units_bias_inc.resize( up_layer_size ); 00520 00521 // The update rule becomes: 00522 // up_units_bias_inc = 00523 // momentum * up_units_bias_inc 00524 // - learning_rate * (up_units_bias_pos_stats/pos_count 00525 // -up_units_bias_neg_stats/neg_count); 00526 // up_units_bias += up_units_bias_inc; 00527 real* uubinc = up_units_bias_inc.data(); 00528 for( int i=0 ; i<up_layer_size ; i++ ) 00529 { 00530 uubinc[i] = momentum * uubinc[i] 00531 + learning_rate * ( nuv[i] - puv[i] ); 00532 uub[i] += uubinc[i]; 00533 } 00534 } 00535 } 00536 00537 void RBMConv2DLLParameters::clearStats() 00538 { 00539 kernel_pos_stats.clear(); 00540 kernel_neg_stats.clear(); 00541 00542 down_units_bias_pos_stats.clear(); 00543 down_units_bias_neg_stats.clear(); 00544 00545 up_units_bias_pos_stats.clear(); 00546 up_units_bias_neg_stats.clear(); 00547 00548 pos_count = 0; 00549 neg_count = 0; 00550 } 00551 00552 void RBMConv2DLLParameters::computeUnitActivations 00553 ( int start, int length, const Vec& activations ) const 00554 { 00555 // Unoptimized way, that computes all the activations and return a subvec 00556 PLASSERT( activations.length() == length ); 00557 if( going_up ) 00558 { 00559 PLASSERT( start+length <= up_layer_size ); 00560 down_image = input_vec.toMat( down_image_length, down_image_width ); 00561 00562 // special cases: 00563 if( length == 1 ) 00564 { 00565 real act = 0; 00566 real* k = kernel.data(); 00567 real* di = down_image.data() 00568 + kernel_step1*(start / down_image_width) 00569 + kernel_step2*(start % down_image_width); 00570 for( int l=0; l<kernel_length; l++, di+=down_image_width ) 00571 for( int m=0; m<kernel_width; m++ ) 00572 act += di[m] * k[m]; 00573 activations[0] = act; 00574 } 00575 else if( start == 0 && length == up_layer_size ) 00576 { 00577 up_image = activations.toMat( up_image_length, up_image_width ); 00578 convolve2D( down_image, kernel, up_image, 00579 kernel_step1, kernel_step2, false ); 00580 } 00581 else 00582 { 00583 up_image = Mat( up_image_length, up_image_width ); 00584 convolve2D( down_image, kernel, up_image, 00585 kernel_step1, kernel_step2, false ); 00586 activations << up_image.toVec().subVec( start, length ); 00587 } 00588 activations += up_units_bias.subVec(start, length); 00589 } 00590 else 00591 { 00592 PLASSERT( start+length <= down_layer_size ); 00593 up_image = input_vec.toMat( up_image_length, up_image_width ); 00594 00595 // special cases 00596 if( start == 0 && length == down_layer_size ) 00597 { 00598 down_image = activations.toMat( down_image_length, 00599 down_image_width ); 00600 backConvolve2D( down_image, kernel, up_image, 00601 kernel_step1, kernel_step2, false ); 00602 } 00603 else 00604 { 00605 down_image = Mat( down_image_length, down_image_width ); 00606 backConvolve2D( down_image, kernel, up_image, 00607 kernel_step1, kernel_step2, false ); 00608 activations << down_image.toVec().subVec( start, length ); 00609 } 00610 activations += down_units_bias.subVec(start, length); 00611 } 00612 } 00613 00615 void RBMConv2DLLParameters::bpropUpdate(const Vec& input, const Vec& output, 00616 Vec& input_gradient, 00617 const Vec& output_gradient) 00618 { 00619 PLASSERT( input.size() == down_layer_size ); 00620 PLASSERT( output.size() == up_layer_size ); 00621 PLASSERT( output_gradient.size() == up_layer_size ); 00622 input_gradient.resize( down_layer_size ); 00623 00624 down_image = input.toMat( down_image_length, down_image_width ); 00625 up_image = output.toMat( up_image_length, up_image_width ); 00626 down_image_gradient = input_gradient.toMat( down_image_length, 00627 down_image_width ); 00628 up_image_gradient = output_gradient.toMat( up_image_length, 00629 up_image_width ); 00630 00631 // update input_gradient and kernel_gradient 00632 convolve2Dbackprop( down_image, kernel, 00633 up_image_gradient, down_image_gradient, 00634 kernel_gradient, 00635 kernel_step1, kernel_step2, false ); 00636 00637 // kernel -= learning_rate * kernel_gradient 00638 multiplyAcc( kernel, kernel_gradient, -learning_rate ); 00639 00640 // (up) bias -= learning_rate * output_gradient 00641 multiplyAcc( up_units_bias, output_gradient, -learning_rate ); 00642 00643 } 00644 00647 void RBMConv2DLLParameters::forget() 00648 { 00649 if( initialization_method == "zero" ) 00650 kernel.clear(); 00651 else 00652 { 00653 if( !random_gen ) 00654 random_gen = new PRandom(); 00655 00656 real d = 1. / max( down_layer_size, up_layer_size ); 00657 if( initialization_method == "uniform_sqrt" ) 00658 d = sqrt( d ); 00659 00660 random_gen->fill_random_uniform( kernel, -d, d ); 00661 } 00662 00663 down_units_bias.clear(); 00664 up_units_bias.clear(); 00665 00666 clearStats(); 00667 } 00668 00669 00670 /* THIS METHOD IS OPTIONAL 00675 void RBMConv2DLLParameters::finalize() 00676 { 00677 } 00678 */ 00679 00681 int RBMConv2DLLParameters::nParameters(bool share_up_params, bool share_down_params) const 00682 { 00683 return kernel.size() + (share_up_params?up_units_bias.size():0) + 00684 (share_down_params?down_units_bias.size():0); 00685 } 00686 00692 Vec RBMConv2DLLParameters::makeParametersPointHere(const Vec& global_parameters, bool share_up_params, bool share_down_params) 00693 { 00694 int n1=kernel.size(); 00695 int n2=up_units_bias.size(); 00696 int n3=down_units_bias.size(); 00697 int n = n1+(share_up_params?n2:0)+(share_down_params?n3:0); // should be = nParameters() 00698 int m = global_parameters.size(); 00699 if (m<n) 00700 PLERROR("RBMConv2DLLParameters::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n); 00701 real* p = global_parameters.data(); 00702 kernel.makeSharedValue(p,n1); 00703 p+=n1; 00704 if (share_up_params) 00705 { 00706 up_units_bias.makeSharedValue(p,n2); 00707 p+=n2; 00708 } 00709 if (share_down_params) 00710 down_units_bias.makeSharedValue(p,n3); 00711 return global_parameters.subVec(n,m-n); 00712 } 00713 00714 00715 00716 } // end of namespace PLearn 00717 00718 00719 /* 00720 Local Variables: 00721 mode:c++ 00722 c-basic-offset:4 00723 c-file-style:"stroustrup" 00724 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00725 indent-tabs-mode:nil 00726 fill-column:79 00727 End: 00728 */ 00729 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :