PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // RBMMixedLayer.cc 00004 // 00005 // Copyright (C) 2006 Pascal Lamblin & Dan Popovici 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pascal Lamblin & Dan Popovici 00036 00041 #include "RBMMixedLayer.h" 00042 #include <plearn/math/TMat_maths.h> 00043 #include "RBMConnection.h" 00044 00045 namespace PLearn { 00046 using namespace std; 00047 00048 PLEARN_IMPLEMENT_OBJECT( 00049 RBMMixedLayer, 00050 "Layer in an RBM, concatenating other sub-layers", 00051 ""); 00052 00053 RBMMixedLayer::RBMMixedLayer() 00054 { 00055 } 00056 00057 RBMMixedLayer::RBMMixedLayer( TVec< PP<RBMLayer> > the_sub_layers ) : 00058 sub_layers( the_sub_layers ) 00059 { 00060 build(); 00061 } 00062 00063 00065 // setLearningRate // 00067 void RBMMixedLayer::setLearningRate( real the_learning_rate ) 00068 { 00069 inherited::setLearningRate( the_learning_rate ); 00070 00071 for( int i=0 ; i<n_layers ; i++ ) 00072 sub_layers[i]->setLearningRate( the_learning_rate ); 00073 } 00074 00076 // setMomentum // 00078 void RBMMixedLayer::setMomentum( real the_momentum ) 00079 { 00080 inherited::setMomentum( the_momentum ); 00081 00082 for( int i=0 ; i<n_layers ; i++ ) 00083 sub_layers[i]->setMomentum( the_momentum ); 00084 } 00085 00087 // setBatchSize // 00089 void RBMMixedLayer::setBatchSize( int the_batch_size ) 00090 { 00091 inherited::setBatchSize( the_batch_size ); 00092 for( int i = 0; i < n_layers; i++ ) 00093 sub_layers[i]->setBatchSize( the_batch_size ); 00094 } 00095 00097 // setExpectation // 00099 void RBMMixedLayer::setExpectation(const Vec& the_expectation) 00100 { 00101 expectation << the_expectation; 00102 expectation_is_up_to_date=true; 00103 for( int i = 0; i < n_layers; i++ ) 00104 sub_layers[i]->expectation_is_up_to_date=true; 00105 } 00106 00108 // setExpectationByRef // 00110 void RBMMixedLayer::setExpectationByRef(const Vec& the_expectation) 00111 { 00112 expectation = the_expectation; 00113 expectation_is_up_to_date=true; 00114 00115 // Rearrange pointers 00116 for( int i = 0; i < n_layers; i++ ) 00117 { 00118 int init_pos = init_positions[i]; 00119 PP<RBMLayer> layer = sub_layers[i]; 00120 int layer_size = layer->size; 00121 00122 layer->setExpectationByRef( expectation.subVec(init_pos, layer_size) ); 00123 } 00124 00125 } 00126 00128 // setExpectations // 00130 void RBMMixedLayer::setExpectations(const Mat& the_expectations) 00131 { 00132 batch_size = the_expectations.length(); 00133 setBatchSize( batch_size ); 00134 expectations << the_expectations; 00135 expectations_are_up_to_date=true; 00136 for( int i = 0; i < n_layers; i++ ) 00137 sub_layers[i]->expectations_are_up_to_date=true; 00138 } 00139 00141 // setExpectationsByRef // 00143 void RBMMixedLayer::setExpectationsByRef(const Mat& the_expectations) 00144 { 00145 batch_size = the_expectations.length(); 00146 setBatchSize( batch_size ); 00147 expectations = the_expectations; 00148 expectations_are_up_to_date=true; 00149 00150 // Rearrange pointers 00151 for( int i = 0; i < n_layers; i++ ) 00152 { 00153 int init_pos = init_positions[i]; 00154 PP<RBMLayer> layer = sub_layers[i]; 00155 int layer_size = layer->size; 00156 00157 layer->setExpectationsByRef(expectations.subMatColumns(init_pos, 00158 layer_size)); 00159 } 00160 } 00161 00162 00163 00165 // getUnitActivation // 00167 void RBMMixedLayer::getUnitActivation( int i, PP<RBMConnection> rbmc, 00168 int offset ) 00169 { 00170 inherited::getUnitActivation( i, rbmc, offset ); 00171 00172 int j = layer_of_unit[i]; 00173 sub_layers[j]->expectation_is_up_to_date = false; 00174 } 00175 00177 // getAllActivations // 00179 void RBMMixedLayer::getAllActivations( PP<RBMConnection> rbmc, int offset, 00180 bool minibatch ) 00181 { 00182 inherited::getAllActivations( rbmc, offset, minibatch ); 00183 for( int i=0 ; i<n_layers ; i++ ) 00184 { 00185 if( minibatch ) 00186 sub_layers[i]->expectations_are_up_to_date = false; 00187 else 00188 sub_layers[i]->expectation_is_up_to_date = false; 00189 } 00190 } 00191 00192 void RBMMixedLayer::expectation_is_not_up_to_date() 00193 { 00194 for( int i=0 ; i<n_layers ; i++ ) 00195 sub_layers[i]->expectation_is_not_up_to_date(); 00196 00197 expectation_is_up_to_date = false; 00198 } 00199 00201 // generateSample // 00203 void RBMMixedLayer::generateSample() 00204 { 00205 for( int i=0 ; i<n_layers ; i++ ) 00206 sub_layers[i]->generateSample(); 00207 } 00208 00210 // generateSample // 00212 void RBMMixedLayer::generateSamples() 00213 { 00214 for( int i=0 ; i<n_layers ; i++ ) 00215 sub_layers[i]->generateSamples(); 00216 } 00217 00219 // computeExpectation // 00221 void RBMMixedLayer::computeExpectation() 00222 { 00223 if( expectation_is_up_to_date ) 00224 return; 00225 00226 for( int i=0 ; i<n_layers ; i++ ) 00227 sub_layers[i]->computeExpectation(); 00228 00229 expectation_is_up_to_date = true; 00230 } 00231 00233 // computeExpectations // 00235 void RBMMixedLayer::computeExpectations() 00236 { 00237 if( expectations_are_up_to_date ) 00238 return; 00239 00240 for( int i=0 ; i < n_layers ; i++ ) 00241 sub_layers[i]->computeExpectations(); 00242 00243 expectations_are_up_to_date = true; 00244 } 00245 00247 // fprop // 00249 void RBMMixedLayer::fprop( const Vec& input, Vec& output ) const 00250 { 00251 PLASSERT( input.size() == input_size ); 00252 output.resize( output_size ); 00253 00254 for( int i=0 ; i<n_layers ; i++ ) 00255 { 00256 int begin = init_positions[i]; 00257 int size_i = sub_layers[i]->size; 00258 Vec sub_input = input.subVec(begin, size_i); 00259 Vec sub_output = output.subVec(begin, size_i); 00260 00261 sub_layers[i]->fprop( sub_input, sub_output ); 00262 } 00263 } 00264 00266 // fprop // 00268 void RBMMixedLayer::fprop( const Mat& inputs, Mat& outputs ) 00269 { 00270 int mbatch_size = inputs.length(); 00271 PLASSERT( inputs.width() == size ); 00272 outputs.resize( mbatch_size, size ); 00273 00274 for( int i=0 ; i<n_layers ; i++ ) 00275 { 00276 int begin = init_positions[i]; 00277 int size_i = sub_layers[i]->size; 00278 Mat sub_inputs = inputs.subMatColumns(begin, size_i); 00279 Mat sub_outputs = outputs.subMatColumns(begin, size_i); 00280 00281 // GCC bug? This doesn't work: 00282 // sub_layers[i]->fprop( sub_inputs, sub_outputs ); 00283 sub_layers[i]->OnlineLearningModule::fprop( sub_inputs, sub_outputs ); 00284 } 00285 } 00286 00287 void RBMMixedLayer::fprop( const Vec& input, const Vec& rbm_bias, 00288 Vec& output ) const 00289 { 00290 PLASSERT( input.size() == input_size ); 00291 PLASSERT( rbm_bias.size() == input_size ); 00292 output.resize( output_size ); 00293 00294 for( int i=0 ; i<n_layers ; i++ ) 00295 { 00296 int begin = init_positions[i]; 00297 int size_i = sub_layers[i]->size; 00298 Vec sub_input = input.subVec(begin, size_i); 00299 Vec sub_rbm_bias = rbm_bias.subVec(begin, size_i); 00300 Vec sub_output = output.subVec(begin, size_i); 00301 00302 sub_layers[i]->fprop( sub_input, sub_rbm_bias, sub_output ); 00303 } 00304 } 00305 00306 00308 // bpropUpdate // 00310 void RBMMixedLayer::bpropUpdate( const Vec& input, const Vec& output, 00311 Vec& input_gradient, 00312 const Vec& output_gradient, 00313 bool accumulate) 00314 { 00315 PLASSERT( input.size() == size ); 00316 PLASSERT( output.size() == size ); 00317 PLASSERT( output_gradient.size() == size ); 00318 00319 if( accumulate ) 00320 { 00321 PLASSERT_MSG( input_gradient.size() == size, 00322 "Cannot resize input_gradient AND accumulate into it" ); 00323 } 00324 else 00325 // Note that, by construction of 'size', the whole gradient vector 00326 // should be cleared in the calls to sub_layers->bpropUpdate(..) below. 00327 input_gradient.resize( size ); 00328 00329 for( int i=0 ; i<n_layers ; i++ ) 00330 { 00331 int begin = init_positions[i]; 00332 int size_i = sub_layers[i]->size; 00333 Vec sub_input = input.subVec( begin, size_i ); 00334 Vec sub_output = output.subVec( begin, size_i ); 00335 Vec sub_input_gradient = input_gradient.subVec( begin, size_i ); 00336 Vec sub_output_gradient = output_gradient.subVec( begin, size_i ); 00337 00338 sub_layers[i]->bpropUpdate( sub_input, sub_output, 00339 sub_input_gradient, sub_output_gradient, 00340 accumulate ); 00341 } 00342 } 00343 00344 void RBMMixedLayer::bpropUpdate(const Mat& inputs, const Mat& outputs, 00345 Mat& input_gradients, 00346 const Mat& output_gradients, 00347 bool accumulate) 00348 { 00349 PLASSERT( inputs.width() == size ); 00350 PLASSERT( outputs.width() == size ); 00351 PLASSERT( output_gradients.width() == size ); 00352 00353 int batch_size = inputs.length(); 00354 PLASSERT( outputs.length() == batch_size ); 00355 PLASSERT( output_gradients.length() == batch_size ); 00356 00357 if( accumulate ) 00358 { 00359 PLASSERT_MSG( input_gradients.width() == size && 00360 input_gradients.length() == batch_size, 00361 "Cannot resize input_gradients and accumulate into it" ); 00362 } 00363 else 00364 // Note that, by construction of 'size', the whole gradient vector 00365 // should be cleared in the calls to sub_layers->bpropUpdate(..) below. 00366 input_gradients.resize(batch_size, size); 00367 00368 for( int i=0 ; i<n_layers ; i++ ) 00369 { 00370 int begin = init_positions[i]; 00371 int size_i = sub_layers[i]->size; 00372 Mat sub_inputs = inputs.subMatColumns( begin, size_i ); 00373 Mat sub_outputs = outputs.subMatColumns( begin, size_i ); 00374 Mat sub_input_gradients = 00375 input_gradients.subMatColumns( begin, size_i ); 00376 Mat sub_output_gradients = 00377 output_gradients.subMatColumns( begin, size_i ); 00378 00379 sub_layers[i]->bpropUpdate( sub_inputs, sub_outputs, 00380 sub_input_gradients, sub_output_gradients, 00381 accumulate ); 00382 } 00383 } 00384 00385 void RBMMixedLayer::bpropUpdate(const Vec& input, const Vec& rbm_bias, 00386 const Vec& output, 00387 Vec& input_gradient, Vec& rbm_bias_gradient, 00388 const Vec& output_gradient) 00389 { 00390 PLASSERT( input.size() == size ); 00391 PLASSERT( rbm_bias.size() == size ); 00392 PLASSERT( output.size() == size ); 00393 PLASSERT( output_gradient.size() == size ); 00394 00395 input_gradient.resize( size ); 00396 rbm_bias_gradient.resize( size ); 00397 00398 for( int i=0 ; i<n_layers ; i++ ) 00399 { 00400 int begin = init_positions[i]; 00401 int size_i = sub_layers[i]->size; 00402 Vec sub_input = input.subVec( begin, size_i ); 00403 Vec sub_rbm_bias = rbm_bias.subVec( begin, size_i ); 00404 Vec sub_output = output.subVec( begin, size_i ); 00405 Vec sub_input_gradient = input_gradient.subVec( begin, size_i ); 00406 Vec sub_rbm_bias_gradient = rbm_bias_gradient.subVec( begin, size_i); 00407 Vec sub_output_gradient = output_gradient.subVec( begin, size_i ); 00408 00409 sub_layers[i]->bpropUpdate( sub_input, sub_rbm_bias, sub_output, 00410 sub_input_gradient, sub_rbm_bias_gradient, 00411 sub_output_gradient ); 00412 } 00413 } 00414 00415 real RBMMixedLayer::fpropNLL(const Vec& target) 00416 { 00417 computeExpectation(); 00418 00419 PLASSERT( target.size() == input_size ); 00420 nlls.resize(n_layers); 00421 00422 real ret = 0; 00423 real nlli = 0; 00424 for( int i=0 ; i<n_layers ; i++ ) 00425 { 00426 int begin = init_positions[i]; 00427 int size_i = sub_layers[i]->size; 00428 nlli = sub_layers[i]->fpropNLL( target.subVec(begin, size_i)); 00429 nlls[i] = nlli; 00430 ret += nlli; 00431 } 00432 return ret; 00433 } 00434 00435 void RBMMixedLayer::fpropNLL(const Mat& targets, const Mat& costs_column) 00436 { 00437 computeExpectation(); 00438 00439 PLASSERT( targets.width() == input_size ); 00440 PLASSERT( targets.length() == batch_size ); 00441 PLASSERT( costs_column.width() == 1 ); 00442 PLASSERT( costs_column.length() == batch_size ); 00443 00444 costs_column.clear(); 00445 mat_nlls.resize(batch_size, n_layers); 00446 for( int i=0 ; i<n_layers ; i++ ) 00447 { 00448 int begin = init_positions[i]; 00449 int size_i = sub_layers[i]->size; 00450 sub_layers[i]->fpropNLL( targets.subMatColumns(begin, size_i), 00451 mat_nlls.column(i) ); 00452 for( int j=0; j < batch_size; ++j ) 00453 costs_column(j,0) += mat_nlls(j, i); 00454 } 00455 } 00456 00457 void RBMMixedLayer::bpropNLL(const Vec& target, real nll, Vec& bias_gradient) 00458 { 00459 computeExpectation(); 00460 00461 PLASSERT( target.size() == input_size ); 00462 bias_gradient.resize( size ); 00463 00464 for( int i=0 ; i<n_layers ; i++ ) 00465 { 00466 int begin = init_positions[i]; 00467 int size_i = sub_layers[i]->size; 00468 00469 Vec sub_target = target.subVec(begin, size_i); 00470 Vec sub_bias_gradient = bias_gradient.subVec(begin, size_i); 00471 sub_layers[i]->bpropNLL( sub_target, nlls[i], sub_bias_gradient ); 00472 } 00473 } 00474 00475 void RBMMixedLayer::bpropNLL(const Mat& targets, const Mat& costs_column, 00476 Mat& bias_gradients) 00477 { 00478 computeExpectations(); 00479 00480 PLASSERT( targets.width() == input_size ); 00481 PLASSERT( targets.length() == batch_size ); 00482 PLASSERT( costs_column.width() == 1 ); 00483 PLASSERT( costs_column.length() == batch_size ); 00484 bias_gradients.resize( batch_size, size ); 00485 00486 for( int i=0 ; i<n_layers ; i++ ) 00487 { 00488 int begin = init_positions[i]; 00489 int size_i = sub_layers[i]->size; 00490 00491 Mat sub_targets = targets.subMatColumns(begin, size_i); 00492 Mat sub_bias_gradients = bias_gradients.subMatColumns(begin, size_i); 00493 // TODO: something else than store mat_nlls... 00494 sub_layers[i]->bpropNLL( sub_targets, mat_nlls.column(i), 00495 sub_bias_gradients ); 00496 } 00497 } 00498 00499 void RBMMixedLayer::declareOptions(OptionList& ol) 00500 { 00501 declareOption(ol, "sub_layers", &RBMMixedLayer::sub_layers, 00502 OptionBase::buildoption, 00503 "The concatenated RBMLayers composing this layer."); 00504 00505 declareOption(ol, "init_positions", &RBMMixedLayer::init_positions, 00506 OptionBase::learntoption, 00507 " Initial index of the sub_layers."); 00508 00509 declareOption(ol, "layer_of_unit", &RBMMixedLayer::layer_of_unit, 00510 OptionBase::learntoption, 00511 "layer_of_unit[i] is the index of sub_layer containing unit" 00512 " i."); 00513 00514 declareOption(ol, "n_layers", &RBMMixedLayer::n_layers, 00515 OptionBase::learntoption, 00516 "Number of sub-layers."); 00517 00518 // Now call the parent class' declareOptions 00519 inherited::declareOptions(ol); 00520 00521 redeclareOption(ol, "bias", &RBMMixedLayer::bias, 00522 OptionBase::nosave, 00523 "bias is the concatenation of the sub_layer's biases."); 00524 00525 redeclareOption(ol, "learning_rate", &RBMMixedLayer::learning_rate, 00526 OptionBase::nosave, 00527 "There is no global learning rate, only sublayers'."); 00528 00529 redeclareOption(ol, "momentum", &RBMMixedLayer::momentum, 00530 OptionBase::nosave, 00531 "There is no global momentum, only sublayers'."); 00532 } 00533 00534 void RBMMixedLayer::accumulatePosStats( const Vec& pos_values ) 00535 { 00536 for( int i=0 ; i<n_layers ; i++ ) 00537 { 00538 Vec sub_pos_values = pos_values.subVec( init_positions[i], 00539 sub_layers[i]->size ); 00540 sub_layers[i]->accumulatePosStats( sub_pos_values ); 00541 } 00542 pos_count++; 00543 } 00544 00545 void RBMMixedLayer::accumulateNegStats( const Vec& neg_values ) 00546 { 00547 for( int i=0 ; i<n_layers ; i++ ) 00548 { 00549 Vec sub_neg_values = neg_values.subVec( init_positions[i], 00550 sub_layers[i]->size ); 00551 sub_layers[i]->accumulateNegStats( sub_neg_values ); 00552 } 00553 neg_count++; 00554 } 00555 00556 void RBMMixedLayer::update() 00557 { 00558 for( int i=0 ; i<n_layers ; i++ ) 00559 sub_layers[i]->update(); 00560 00561 clearStats(); 00562 } 00563 00564 void RBMMixedLayer::update( const Vec& pos_values, const Vec& neg_values ) 00565 { 00566 for( int i=0 ; i<n_layers ; i++ ) 00567 { 00568 int begin = init_positions[i]; 00569 int size_i = sub_layers[i]->size; 00570 Vec sub_pos_values = pos_values.subVec( begin, size_i ); 00571 Vec sub_neg_values = neg_values.subVec( begin, size_i ); 00572 00573 sub_layers[i]->update( sub_pos_values, sub_neg_values ); 00574 } 00575 } 00576 00577 void RBMMixedLayer::update( const Mat& pos_values, const Mat& neg_values ) 00578 { 00579 for( int i=0 ; i<n_layers ; i++ ) 00580 { 00581 int begin = init_positions[i]; 00582 int size_i = sub_layers[i]->size; 00583 Mat sub_pos_values = pos_values.subMatColumns( begin, size_i ); 00584 Mat sub_neg_values = neg_values.subMatColumns( begin, size_i ); 00585 00586 sub_layers[i]->update( sub_pos_values, sub_neg_values ); 00587 } 00588 } 00589 00590 void RBMMixedLayer::reset() 00591 { 00592 for( int i=0 ; i<n_layers ; i++ ) 00593 sub_layers[i]->reset(); 00594 00595 expectation_is_up_to_date = false; 00596 } 00597 00598 void RBMMixedLayer::clearStats() 00599 { 00600 for( int i=0 ; i<n_layers ; i++ ) 00601 sub_layers[i]->clearStats(); 00602 00603 pos_count = 0; 00604 neg_count = 0; 00605 } 00606 00607 void RBMMixedLayer::forget() 00608 { 00609 inherited::forget(); 00610 if( !random_gen ) 00611 { 00612 PLWARNING("RBMMixedLayer: cannot forget() without random_gen"); 00613 return; 00614 } 00615 for( int i=0; i<n_layers; i++ ) 00616 { 00617 if( !(sub_layers[i]->random_gen) ) 00618 sub_layers[i]->random_gen = random_gen; 00619 sub_layers[i]->forget(); 00620 } 00621 } 00622 00623 void RBMMixedLayer::build_() 00624 { 00625 size = 0; 00626 n_layers = sub_layers.size(); 00627 init_positions.resize(n_layers); 00628 00629 // Fill init_positions 00630 for( int i = 0; i < n_layers; i++ ) 00631 { 00632 init_positions[i] = size; 00633 size += sub_layers[i]->size; 00634 } 00635 00636 // Resize 00637 layer_of_unit.resize( size ); 00638 00639 activation.resize( size ); 00640 activations.resize( batch_size, size ); 00641 00642 sample.resize( size ); 00643 samples.resize( batch_size, size ); 00644 00645 expectation.resize( size ); 00646 expectations.resize( batch_size, size ); 00647 00648 bias.resize( size ); 00649 00650 // Second loop, to initialize activation, expectation, etc. 00651 for( int i = 0; i < n_layers; i++ ) 00652 { 00653 int init_pos = init_positions[i]; 00654 PP<RBMLayer> layer = sub_layers[i]; 00655 int layer_size = layer->size; 00656 00657 layer_of_unit.subVec(init_pos, layer_size).fill(i); 00658 layer->batch_size = batch_size; 00659 00660 layer->activation = activation.subVec(init_pos, layer_size); 00661 layer->activations = activations.subMatColumns(init_pos, layer_size); 00662 00663 layer->sample = sample.subVec(init_pos, layer_size); 00664 layer->samples = samples.subMatColumns(init_pos, layer_size); 00665 00666 layer->setExpectationByRef( expectation.subVec(init_pos, layer_size) ); 00667 layer->setExpectationsByRef(expectations.subMatColumns(init_pos, 00668 layer_size)); 00669 00670 bias.subVec(init_pos, layer_size) << layer->bias; 00671 layer->bias = bias.subVec(init_pos, layer_size); 00672 00673 // We changed fields of layer, so we need to rebuild it (especially 00674 // if it is another RBMMixedLayer) 00675 layer->build(); 00676 00677 if( learning_rate >= 0. ) 00678 layer->setLearningRate( learning_rate ); 00679 00680 if( momentum >= 0. ) 00681 layer->setMomentum( momentum ); 00682 00683 // If we have a random_gen and sub_layers[i] does not, share it 00684 if( random_gen && !(sub_layers[i]->random_gen) ) 00685 { 00686 layer->random_gen = random_gen; 00687 layer->forget(); 00688 } 00689 } 00690 00691 input_size = size; 00692 output_size = size; 00693 } 00694 00695 void RBMMixedLayer::build() 00696 { 00697 inherited::build(); 00698 build_(); 00699 } 00700 00701 00702 void RBMMixedLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00703 { 00704 inherited::makeDeepCopyFromShallowCopy(copies); 00705 00706 deepCopyField(sub_layers, copies); 00707 deepCopyField(init_positions, copies); 00708 deepCopyField(layer_of_unit, copies); 00709 deepCopyField(nlls, copies); 00710 deepCopyField(mat_nlls, copies); 00711 } 00712 00713 real RBMMixedLayer::energy(const Vec& unit_values) const 00714 { 00715 real energy = 0; 00716 00717 for ( int i = 0; i < n_layers; ++i ) { 00718 int begin = init_positions[i]; 00719 int size_i = sub_layers[i]->size; 00720 Vec values = unit_values.subVec( begin, size_i ); 00721 energy += sub_layers[i]->energy(values); 00722 } 00723 00724 return energy; 00725 } 00726 00727 real RBMMixedLayer::freeEnergyContribution(const Vec& unit_activations) 00728 const 00729 { 00730 real freeEnergy = 0; 00731 00732 Vec act; 00733 for ( int i = 0; i < n_layers; ++i ) { 00734 int begin = init_positions[i]; 00735 int size_i = sub_layers[i]->size; 00736 act = unit_activations.subVec( begin, size_i ); 00737 freeEnergy += sub_layers[i]->freeEnergyContribution(act); 00738 } 00739 00740 return freeEnergy; 00741 } 00742 00743 void RBMMixedLayer::freeEnergyContributionGradient( 00744 const Vec& unit_activations, 00745 Vec& unit_activations_gradient, 00746 real output_gradient, bool accumulate) const 00747 { 00748 Vec act; 00749 Vec gact; 00750 for ( int i = 0; i < n_layers; ++i ) { 00751 int begin = init_positions[i]; 00752 int size_i = sub_layers[i]->size; 00753 act = unit_activations.subVec( begin, size_i ); 00754 gact = unit_activations_gradient.subVec( begin, size_i ); 00755 sub_layers[i]->freeEnergyContributionGradient( 00756 act, gact, output_gradient, accumulate); 00757 } 00758 } 00759 00760 int RBMMixedLayer::getConfigurationCount() 00761 { 00762 int count = 1; 00763 00764 for ( int i = 0; i < n_layers; ++i ) { 00765 int cc_layer_i = sub_layers[i]->getConfigurationCount(); 00766 // Avoiding overflow 00767 if ( INFINITE_CONFIGURATIONS/cc_layer_i <= count ) 00768 return INFINITE_CONFIGURATIONS; 00769 count *= cc_layer_i; 00770 } 00771 00772 return count; 00773 } 00774 00775 void RBMMixedLayer::getConfiguration(int conf_index, Vec& output) 00776 { 00777 PLASSERT( output.length() == size ); 00778 PLASSERT( conf_index >= 0 && conf_index < getConfigurationCount() ); 00779 00780 int conf_i = conf_index; 00781 for ( int i = 0; i < n_layers; ++i ) { 00782 int conf_layer_i = sub_layers[i]->getConfigurationCount(); 00783 int begin = init_positions[i]; 00784 int size_i = sub_layers[i]->size; 00785 Vec output_i = output.subVec( begin, size_i ); 00786 sub_layers[i]->getConfiguration(conf_i % conf_layer_i, output_i); 00787 conf_i /= conf_layer_i; 00788 } 00789 } 00790 00791 } // end of namespace PLearn 00792 00793 00794 /* 00795 Local Variables: 00796 mode:c++ 00797 c-basic-offset:4 00798 c-file-style:"stroustrup" 00799 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00800 indent-tabs-mode:nil 00801 fill-column:79 00802 End: 00803 */ 00804 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :