PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // StackedFocusedAutoassociatorsNet.cc 00004 // 00005 // Copyright (C) 2007 Hugo Larochelle 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pascal Lamblin 00036 00040 #define PL_LOG_MODULE_NAME "StackedFocusedAutoassociatorsNet" 00041 #include <plearn/io/pl_log.h> 00042 00043 #include "StackedFocusedAutoassociatorsNet.h" 00044 #include <plearn/vmat/VMat_computeNearestNeighbors.h> 00045 #include <plearn/vmat/GetInputVMatrix.h> 00046 #include <plearn_learners/online/RBMMixedLayer.h> 00047 #include <plearn_learners/online/RBMMixedConnection.h> 00048 00049 namespace PLearn { 00050 using namespace std; 00051 00052 PLEARN_IMPLEMENT_OBJECT( 00053 StackedFocusedAutoassociatorsNet, 00054 "Neural net, trained layer-wise in a greedy but focused fashion using autoassociators/RBMs and a supervised non-parametric gradient.", 00055 "It is highly inspired by the StackedFocusedAutoassociators class,\n" 00056 "and can use use the same RBMLayer and RBMConnection components.\n" 00057 ); 00058 00059 StackedFocusedAutoassociatorsNet::StackedFocusedAutoassociatorsNet() : 00060 cd_learning_rate( 0. ), 00061 cd_decrease_ct( 0. ), 00062 greedy_learning_rate( 0. ), 00063 greedy_decrease_ct( 0. ), 00064 supervised_greedy_learning_rate( 0. ), 00065 supervised_greedy_decrease_ct( 0. ), 00066 fine_tuning_learning_rate( 0. ), 00067 fine_tuning_decrease_ct( 0. ), 00068 k_neighbors( 1 ), 00069 n_classes( -1 ), 00070 dissimilar_example_cost_precision(2.77), // Value taken from original paper 00071 do_not_use_knn_classifier(false), 00072 output_weights_l1_penalty_factor(0), 00073 output_weights_l2_penalty_factor(0), 00074 n_layers( 0 ), 00075 train_set_representations_up_to_date(false), 00076 currently_trained_layer( 0 ) 00077 { 00078 // random_gen will be initialized in PLearner::build_() 00079 random_gen = new PRandom(); 00080 nstages = 0; 00081 } 00082 00083 void StackedFocusedAutoassociatorsNet::declareOptions(OptionList& ol) 00084 { 00085 declareOption(ol, "cd_learning_rate", 00086 &StackedFocusedAutoassociatorsNet::cd_learning_rate, 00087 OptionBase::buildoption, 00088 "The learning rate used during the RBM " 00089 "contrastive divergence training"); 00090 00091 declareOption(ol, "cd_decrease_ct", 00092 &StackedFocusedAutoassociatorsNet::cd_decrease_ct, 00093 OptionBase::buildoption, 00094 "The decrease constant of the learning rate used during " 00095 "the RBMs contrastive\n" 00096 "divergence training. When a hidden layer has finished " 00097 "its training,\n" 00098 "the learning rate is reset to it's initial value.\n"); 00099 00100 declareOption(ol, "greedy_learning_rate", 00101 &StackedFocusedAutoassociatorsNet::greedy_learning_rate, 00102 OptionBase::buildoption, 00103 "The learning rate used during the autoassociator " 00104 "gradient descent training"); 00105 00106 declareOption(ol, "greedy_decrease_ct", 00107 &StackedFocusedAutoassociatorsNet::greedy_decrease_ct, 00108 OptionBase::buildoption, 00109 "The decrease constant of the learning rate used during " 00110 "the autoassociator\n" 00111 "gradient descent training. When a hidden layer has finished " 00112 "its training,\n" 00113 "the learning rate is reset to it's initial value.\n"); 00114 00115 declareOption(ol, "supervised_greedy_learning_rate", 00116 &StackedFocusedAutoassociatorsNet::supervised_greedy_learning_rate, 00117 OptionBase::buildoption, 00118 "Supervised, non-parametric, greedy learning rate"); 00119 00120 declareOption(ol, "supervised_greedy_decrease_ct", 00121 &StackedFocusedAutoassociatorsNet::supervised_greedy_decrease_ct, 00122 OptionBase::buildoption, 00123 "Supervised, non-parametric, greedy decrease constant"); 00124 00125 declareOption(ol, "fine_tuning_learning_rate", 00126 &StackedFocusedAutoassociatorsNet::fine_tuning_learning_rate, 00127 OptionBase::buildoption, 00128 "The learning rate used during the fine tuning gradient descent"); 00129 00130 declareOption(ol, "fine_tuning_decrease_ct", 00131 &StackedFocusedAutoassociatorsNet::fine_tuning_decrease_ct, 00132 OptionBase::buildoption, 00133 "The decrease constant of the learning rate used during " 00134 "fine tuning\n" 00135 "gradient descent.\n"); 00136 00137 declareOption(ol, "training_schedule", 00138 &StackedFocusedAutoassociatorsNet::training_schedule, 00139 OptionBase::buildoption, 00140 "Number of examples to use during each phase of greedy pre-training.\n" 00141 "The number of fine-tunig steps is defined by nstages.\n" 00142 ); 00143 00144 declareOption(ol, "layers", &StackedFocusedAutoassociatorsNet::layers, 00145 OptionBase::buildoption, 00146 "The layers of units in the network. The first element\n" 00147 "of this vector should be the input layer and the\n" 00148 "subsequent elements should be the hidden layers. The\n" 00149 "output layer should not be included in layers.\n"); 00150 00151 declareOption(ol, "connections", &StackedFocusedAutoassociatorsNet::connections, 00152 OptionBase::buildoption, 00153 "The weights of the connections between the layers"); 00154 00155 declareOption(ol, "reconstruction_connections", 00156 &StackedFocusedAutoassociatorsNet::reconstruction_connections, 00157 OptionBase::buildoption, 00158 "The reconstruction weights of the autoassociators"); 00159 00160 declareOption(ol, "unsupervised_layers", 00161 &StackedFocusedAutoassociatorsNet::unsupervised_layers, 00162 OptionBase::buildoption, 00163 "Additional units for greedy unsupervised learning"); 00164 00165 declareOption(ol, "unsupervised_connections", 00166 &StackedFocusedAutoassociatorsNet::unsupervised_connections, 00167 OptionBase::buildoption, 00168 "Additional connections for greedy unsupervised learning"); 00169 00170 declareOption(ol, "k_neighbors", 00171 &StackedFocusedAutoassociatorsNet::k_neighbors, 00172 OptionBase::buildoption, 00173 "Number of good nearest neighbors to attract and bad nearest " 00174 "neighbors to repel."); 00175 00176 declareOption(ol, "n_classes", 00177 &StackedFocusedAutoassociatorsNet::n_classes, 00178 OptionBase::buildoption, 00179 "Number of classes."); 00180 00181 declareOption(ol, "dissimilar_example_cost_precision", 00182 &StackedFocusedAutoassociatorsNet::dissimilar_example_cost_precision, 00183 OptionBase::buildoption, 00184 "Parameter that constrols the importance of the dissimilar example cost."); 00185 00186 declareOption(ol, "do_not_use_knn_classifier", 00187 &StackedFocusedAutoassociatorsNet::do_not_use_knn_classifier, 00188 OptionBase::buildoption, 00189 "Use standard neural net architecture, not the nearest " 00190 "neighbor model."); 00191 00192 declareOption(ol, "greedy_stages", 00193 &StackedFocusedAutoassociatorsNet::greedy_stages, 00194 OptionBase::learntoption, 00195 "Number of training samples seen in the different greedy " 00196 "phases.\n" 00197 ); 00198 00199 declareOption(ol, "n_layers", &StackedFocusedAutoassociatorsNet::n_layers, 00200 OptionBase::learntoption, 00201 "Number of layers" 00202 ); 00203 00204 declareOption(ol, "final_module", 00205 &StackedFocusedAutoassociatorsNet::final_module, 00206 OptionBase::learntoption, 00207 "Output layer of neural net" 00208 ); 00209 00210 declareOption(ol, "final_cost", 00211 &StackedFocusedAutoassociatorsNet::final_cost, 00212 OptionBase::learntoption, 00213 "Cost on output layer of neural net" 00214 ); 00215 00216 // Now call the parent class' declareOptions 00217 inherited::declareOptions(ol); 00218 } 00219 00220 void StackedFocusedAutoassociatorsNet::build_() 00221 { 00222 // ### This method should do the real building of the object, 00223 // ### according to set 'options', in *any* situation. 00224 // ### Typical situations include: 00225 // ### - Initial building of an object from a few user-specified options 00226 // ### - Building of a "reloaded" object: i.e. from the complete set of 00227 // ### all serialised options. 00228 // ### - Updating or "re-building" of an object after a few "tuning" 00229 // ### options have been modified. 00230 // ### You should assume that the parent class' build_() has already been 00231 // ### called. 00232 00233 MODULE_LOG << "build_() called" << endl; 00234 00235 if(inputsize_ > 0 && targetsize_ > 0) 00236 { 00237 // Initialize some learnt variables 00238 n_layers = layers.length(); 00239 00240 train_set_representations_up_to_date = false; 00241 00242 if( n_classes <= 0 ) 00243 PLERROR("StackedFocusedAutoassociatorsNet::build_() - \n" 00244 "n_classes should be > 0.\n"); 00245 test_votes.resize(n_classes); 00246 00247 if( k_neighbors <= 0 ) 00248 PLERROR("StackedFocusedAutoassociatorsNet::build_() - \n" 00249 "k_neighbors should be > 0.\n"); 00250 test_nearest_neighbors_indices.resize(k_neighbors); 00251 00252 if( weightsize_ > 0 ) 00253 PLERROR("StackedFocusedAutoassociatorsNet::build_() - \n" 00254 "usage of weighted samples (weight size > 0) is not\n" 00255 "implemented yet.\n"); 00256 00257 if( training_schedule.length() != n_layers-1 ) 00258 PLERROR("StackedFocusedAutoassociatorsNet::build_() - \n" 00259 "training_schedule should have %d elements.\n", 00260 n_layers-1); 00261 00262 if(greedy_stages.length() == 0) 00263 { 00264 greedy_stages.resize(n_layers-1); 00265 greedy_stages.clear(); 00266 } 00267 00268 if(stage > 0) 00269 currently_trained_layer = n_layers; 00270 else 00271 { 00272 currently_trained_layer = n_layers-1; 00273 while(currently_trained_layer>1 00274 && greedy_stages[currently_trained_layer-1] <= 0) 00275 currently_trained_layer--; 00276 } 00277 00278 build_layers_and_connections(); 00279 00280 if( do_not_use_knn_classifier & (!final_module || !final_cost) ) 00281 build_output_layer_and_cost(); 00282 } 00283 } 00284 00285 void StackedFocusedAutoassociatorsNet::build_output_layer_and_cost() 00286 { 00287 GradNNetLayerModule* gnl = new GradNNetLayerModule(); 00288 gnl->input_size = layers[n_layers-1]->size; 00289 gnl->output_size = n_classes; 00290 gnl->L1_penalty_factor = output_weights_l1_penalty_factor; 00291 gnl->L2_penalty_factor = output_weights_l2_penalty_factor; 00292 gnl->random_gen = random_gen; 00293 gnl->build(); 00294 00295 SoftmaxModule* sm = new SoftmaxModule(); 00296 sm->input_size = n_classes; 00297 sm->random_gen = random_gen; 00298 sm->build(); 00299 00300 ModuleStackModule* msm = new ModuleStackModule(); 00301 msm->modules.resize(2); 00302 msm->modules[0] = gnl; 00303 msm->modules[1] = sm; 00304 msm->random_gen = random_gen; 00305 msm->build(); 00306 final_module = msm; 00307 00308 final_module->forget(); 00309 00310 NLLCostModule* nll = new NLLCostModule(); 00311 nll->input_size = n_classes; 00312 nll->random_gen = random_gen; 00313 nll->build(); 00314 00315 ClassErrorCostModule* class_error = new ClassErrorCostModule(); 00316 class_error->input_size = n_classes; 00317 class_error->random_gen = random_gen; 00318 class_error->build(); 00319 00320 CombiningCostsModule* comb_costs = new CombiningCostsModule(); 00321 comb_costs->cost_weights.resize(2); 00322 comb_costs->cost_weights[0] = 1; 00323 comb_costs->cost_weights[1] = 0; 00324 comb_costs->sub_costs.resize(2); 00325 comb_costs->sub_costs[0] = nll; 00326 comb_costs->sub_costs[1] = class_error; 00327 comb_costs->build(); 00328 00329 final_cost = comb_costs; 00330 final_cost->forget(); 00331 } 00332 00333 void StackedFocusedAutoassociatorsNet::build_layers_and_connections() 00334 { 00335 MODULE_LOG << "build_layers_and_connections() called" << endl; 00336 00337 if( connections.length() != n_layers-1 ) 00338 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() - \n" 00339 "there should be %d connections.\n", 00340 n_layers-1); 00341 00342 if( !fast_exact_is_equal( greedy_learning_rate, 0 ) 00343 && reconstruction_connections.length() != n_layers-1 ) 00344 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() - \n" 00345 "there should be %d reconstruction connections.\n", 00346 n_layers-1); 00347 00348 if( !( reconstruction_connections.length() == 0 00349 || reconstruction_connections.length() == n_layers-1 ) ) 00350 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() - \n" 00351 "there should be either 0 or %d reconstruction connections.\n", 00352 n_layers-1); 00353 00354 00355 if(unsupervised_layers.length() != n_layers-1 00356 && unsupervised_layers.length() != 0) 00357 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() - \n" 00358 "there should be either 0 of %d unsupervised_layers.\n", 00359 n_layers-1); 00360 00361 if(unsupervised_connections.length() != n_layers-1 00362 && unsupervised_connections.length() != 0) 00363 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() - \n" 00364 "there should be either 0 of %d unsupervised_connections.\n", 00365 n_layers-1); 00366 00367 if(unsupervised_connections.length() != unsupervised_layers.length()) 00368 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() - \n" 00369 "there should be as many unsupervised_connections and " 00370 "unsupervised_layers.\n"); 00371 00372 00373 if(layers[0]->size != inputsize_) 00374 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() - \n" 00375 "layers[0] should have a size of %d.\n", 00376 inputsize_); 00377 00378 00379 activations.resize( n_layers ); 00380 expectations.resize( n_layers ); 00381 activation_gradients.resize( n_layers ); 00382 expectation_gradients.resize( n_layers ); 00383 00384 greedy_layers.resize(n_layers-1); 00385 greedy_connections.resize(n_layers-1); 00386 for( int i=0 ; i<n_layers-1 ; i++ ) 00387 { 00388 if( layers[i]->size != connections[i]->down_size ) 00389 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00390 "- \n" 00391 "connections[%i] should have a down_size of %d.\n", 00392 i, layers[i]->size); 00393 00394 if( connections[i]->up_size != layers[i+1]->size ) 00395 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00396 "- \n" 00397 "connections[%i] should have a up_size of %d.\n", 00398 i, layers[i+1]->size); 00399 00400 if(unsupervised_layers.length() != 0 && 00401 unsupervised_connections.length() != 0 && 00402 unsupervised_layers[i] && unsupervised_connections[i]) 00403 { 00404 if( layers[i]->size != 00405 unsupervised_connections[i]->down_size ) 00406 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00407 "- \n" 00408 "connections[%i] should have a down_size of %d.\n", 00409 i, unsupervised_layers[i]->size); 00410 00411 if( unsupervised_connections[i]->up_size != 00412 unsupervised_layers[i]->size ) 00413 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00414 "- \n" 00415 "connections[%i] should have a up_size of %d.\n", 00416 i, unsupervised_layers[i+1]->size); 00417 00418 if( reconstruction_connections.length() != 0 ) 00419 { 00420 if( layers[i+1]->size + unsupervised_layers[i]->size != 00421 reconstruction_connections[i]->down_size ) 00422 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00423 "- \n" 00424 "recontruction_connections[%i] should have a down_size of " 00425 "%d.\n", 00426 i, layers[i+1]->size + unsupervised_layers[i]->size); 00427 00428 if( reconstruction_connections[i]->up_size != 00429 layers[i]->size ) 00430 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00431 "- \n" 00432 "recontruction_connections[%i] should have a up_size of " 00433 "%d.\n", 00434 i, layers[i]->size); 00435 } 00436 00437 if( !(unsupervised_layers[i]->random_gen) ) 00438 { 00439 unsupervised_layers[i]->random_gen = random_gen; 00440 unsupervised_layers[i]->forget(); 00441 } 00442 00443 if( !(unsupervised_connections[i]->random_gen) ) 00444 { 00445 unsupervised_connections[i]->random_gen = random_gen; 00446 unsupervised_connections[i]->forget(); 00447 } 00448 00449 PP<RBMMixedLayer> greedy_layer = new RBMMixedLayer(); 00450 greedy_layer->sub_layers.resize(2); 00451 greedy_layer->sub_layers[0] = layers[i+1]; 00452 greedy_layer->sub_layers[1] = unsupervised_layers[i]; 00453 greedy_layer->size = layers[i+1]->size + unsupervised_layers[i]->size; 00454 greedy_layer->build(); 00455 00456 PP<RBMMixedConnection> greedy_connection = new RBMMixedConnection(); 00457 greedy_connection->sub_connections.resize(2,1); 00458 greedy_connection->sub_connections(0,0) = connections[i]; 00459 greedy_connection->sub_connections(1,0) = unsupervised_connections[i]; 00460 greedy_connection->build(); 00461 00462 greedy_layers[i] = greedy_layer; 00463 greedy_connections[i] = greedy_connection; 00464 } 00465 else 00466 { 00467 if( reconstruction_connections.length() != 0 ) 00468 { 00469 if( layers[i+1]->size != reconstruction_connections[i]->down_size ) 00470 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00471 "- \n" 00472 "recontruction_connections[%i] should have a down_size of " 00473 "%d.\n", 00474 i, layers[i+1]->size); 00475 00476 if( reconstruction_connections[i]->up_size != layers[i]->size ) 00477 PLERROR("StackedFocusedAutoassociatorsNet::build_layers_and_connections() " 00478 "- \n" 00479 "recontruction_connections[%i] should have a up_size of " 00480 "%d.\n", 00481 i, layers[i]->size); 00482 } 00483 greedy_layers[i] = layers[i+1]; 00484 greedy_connections[i] = connections[i]; 00485 } 00486 00487 if( !(layers[i]->random_gen) ) 00488 { 00489 layers[i]->random_gen = random_gen; 00490 layers[i]->forget(); 00491 } 00492 00493 if( !(connections[i]->random_gen) ) 00494 { 00495 connections[i]->random_gen = random_gen; 00496 connections[i]->forget(); 00497 } 00498 00499 if( reconstruction_connections.length() != 0 00500 && !(reconstruction_connections[i]->random_gen) ) 00501 { 00502 reconstruction_connections[i]->random_gen = random_gen; 00503 reconstruction_connections[i]->forget(); 00504 } 00505 00506 activations[i].resize( layers[i]->size ); 00507 expectations[i].resize( layers[i]->size ); 00508 activation_gradients[i].resize( layers[i]->size ); 00509 expectation_gradients[i].resize( layers[i]->size ); 00510 } 00511 00512 if( !(layers[n_layers-1]->random_gen) ) 00513 { 00514 layers[n_layers-1]->random_gen = random_gen; 00515 layers[n_layers-1]->forget(); 00516 } 00517 activations[n_layers-1].resize( layers[n_layers-1]->size ); 00518 expectations[n_layers-1].resize( layers[n_layers-1]->size ); 00519 activation_gradients[n_layers-1].resize( layers[n_layers-1]->size ); 00520 expectation_gradients[n_layers-1].resize( layers[n_layers-1]->size ); 00521 } 00522 00523 // ### Nothing to add here, simply calls build_ 00524 void StackedFocusedAutoassociatorsNet::build() 00525 { 00526 inherited::build(); 00527 build_(); 00528 } 00529 00530 00531 void StackedFocusedAutoassociatorsNet::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00532 { 00533 inherited::makeDeepCopyFromShallowCopy(copies); 00534 00535 // deepCopyField(, copies); 00536 00537 // Public options 00538 deepCopyField(training_schedule, copies); 00539 deepCopyField(layers, copies); 00540 deepCopyField(connections, copies); 00541 deepCopyField(reconstruction_connections, copies); 00542 deepCopyField(unsupervised_layers, copies); 00543 deepCopyField(unsupervised_connections, copies); 00544 00545 // Protected options 00546 deepCopyField(activations, copies); 00547 deepCopyField(expectations, copies); 00548 deepCopyField(activation_gradients, copies); 00549 deepCopyField(expectation_gradients, copies); 00550 deepCopyField(greedy_activation, copies); 00551 deepCopyField(greedy_expectation, copies); 00552 deepCopyField(greedy_activation_gradient, copies); 00553 deepCopyField(greedy_expectation_gradient, copies); 00554 deepCopyField(reconstruction_activations, copies); 00555 deepCopyField(reconstruction_activation_gradients, copies); 00556 deepCopyField(reconstruction_expectation_gradients, copies); 00557 deepCopyField(greedy_layers, copies); 00558 deepCopyField(greedy_connections, copies); 00559 deepCopyField(similar_example_representation, copies); 00560 deepCopyField(dissimilar_example_representation, copies); 00561 deepCopyField(input_representation, copies); 00562 deepCopyField(previous_input_representation, copies); 00563 deepCopyField(dissimilar_gradient_contribution, copies); 00564 deepCopyField(pos_down_val, copies); 00565 deepCopyField(pos_up_val, copies); 00566 deepCopyField(neg_down_val, copies); 00567 deepCopyField(neg_up_val, copies); 00568 deepCopyField(final_cost_input, copies); 00569 deepCopyField(final_cost_value, copies); 00570 deepCopyField(final_cost_gradient, copies); 00571 deepCopyField(class_datasets, copies); 00572 deepCopyField(other_classes_proportions, copies); 00573 deepCopyField(nearest_neighbors_indices, copies); 00574 deepCopyField(test_nearest_neighbors_indices, copies); 00575 deepCopyField(test_votes, copies); 00576 deepCopyField(train_set_representations, copies); 00577 deepCopyField(train_set_representations_vmat, copies); 00578 deepCopyField(train_set_targets, copies); 00579 deepCopyField(greedy_stages, copies); 00580 deepCopyField(final_module, copies); 00581 deepCopyField(final_cost, copies); 00582 } 00583 00584 00585 int StackedFocusedAutoassociatorsNet::outputsize() const 00586 { 00587 //if(currently_trained_layer < n_layers) 00588 // return layers[currently_trained_layer]->size; 00589 //return layers[n_layers-1]->size; 00590 return n_classes; 00591 } 00592 00593 void StackedFocusedAutoassociatorsNet::forget() 00594 { 00598 00605 inherited::forget(); 00606 00607 train_set_representations_up_to_date = false; 00608 00609 for( int i=0 ; i<n_layers ; i++ ) 00610 layers[i]->forget(); 00611 00612 for( int i=0 ; i<n_layers-1 ; i++ ) 00613 connections[i]->forget(); 00614 00615 if(unsupervised_layers.length() != 0) 00616 for( int i=0 ; i<n_layers-1 ; i++ ) 00617 unsupervised_layers[i]->forget(); 00618 00619 if(unsupervised_connections.length() != 0) 00620 for( int i=0 ; i<n_layers-1 ; i++ ) 00621 unsupervised_connections[i]->forget(); 00622 00623 for( int i=0; i<reconstruction_connections.length(); i++) 00624 reconstruction_connections[i]->forget(); 00625 00626 if( do_not_use_knn_classifier ) 00627 build_output_layer_and_cost(); 00628 00629 stage = 0; 00630 greedy_stages.clear(); 00631 } 00632 00633 void StackedFocusedAutoassociatorsNet::train() 00634 { 00635 MODULE_LOG << "train() called " << endl; 00636 MODULE_LOG << " training_schedule = " << training_schedule << endl; 00637 00638 Vec input( inputsize() ); 00639 Vec similar_example( inputsize() ); 00640 Vec dissimilar_example( inputsize() ); 00641 Vec target( targetsize() ); 00642 Vec target2( targetsize() ); 00643 real weight; // unused 00644 real weight2; // unused 00645 00646 Vec similar_example_index(1); 00647 00648 TVec<string> train_cost_names = getTrainCostNames() ; 00649 Vec train_costs( train_cost_names.length() ); 00650 train_costs.fill(MISSING_VALUE) ; 00651 00652 int nsamples = train_set->length(); 00653 int sample; 00654 00655 PP<ProgressBar> pb; 00656 00657 // clear stats of previous epoch 00658 train_stats->forget(); 00659 00660 int init_stage; 00661 00662 /***** initial greedy training *****/ 00663 for( int i=0 ; i<n_layers-1 ; i++ ) 00664 { 00665 MODULE_LOG << "Training connection weights between layers " << i 00666 << " and " << i+1 << endl; 00667 00668 int end_stage = training_schedule[i]; 00669 int* this_stage = greedy_stages.subVec(i,1).data(); 00670 init_stage = *this_stage; 00671 00672 MODULE_LOG << " stage = " << *this_stage << endl; 00673 MODULE_LOG << " end_stage = " << end_stage << endl; 00674 MODULE_LOG << " greedy_learning_rate = " << greedy_learning_rate << endl; 00675 00676 if( report_progress && *this_stage < end_stage ) 00677 pb = new ProgressBar( "Training layer "+tostring(i) 00678 +" of "+classname(), 00679 end_stage - init_stage ); 00680 00681 train_costs.fill(MISSING_VALUE); 00682 reconstruction_activations.resize(layers[i]->size); 00683 reconstruction_activation_gradients.resize(layers[i]->size); 00684 reconstruction_expectation_gradients.resize(layers[i]->size); 00685 00686 if( !fast_exact_is_equal( supervised_greedy_learning_rate, 0 ) ) 00687 { 00688 similar_example_representation.resize(layers[i+1]->size); 00689 dissimilar_example_representation.resize(layers[i+1]->size); 00690 dissimilar_gradient_contribution.resize(layers[i+1]->size); 00691 } 00692 00693 input_representation.resize(layers[i+1]->size); 00694 greedy_activation.resize(greedy_layers[i]->size); 00695 greedy_expectation.resize(greedy_layers[i]->size); 00696 greedy_activation_gradient.resize(greedy_layers[i]->size); 00697 greedy_expectation_gradient.resize(greedy_layers[i]->size); 00698 00699 pos_down_val.resize(layers[i]->size); 00700 pos_up_val.resize(greedy_layers[i]->size); 00701 neg_down_val.resize(layers[i]->size); 00702 neg_up_val.resize(greedy_layers[i]->size); 00703 00704 for( ; *this_stage<end_stage ; (*this_stage)++ ) 00705 { 00706 00707 sample = *this_stage % nsamples; 00708 train_set->getExample(sample, input, target, weight); 00709 if( !fast_exact_is_equal( supervised_greedy_learning_rate, 0 ) ) 00710 { 00711 // Find similar example 00712 00713 int sim_index = random_gen->uniform_multinomial_sample(k_neighbors); 00714 class_datasets[(int)round(target[0])]->getExample( 00715 nearest_neighbors_indices(sample,sim_index), 00716 similar_example, target2, weight2); 00717 00718 if(round(target[0]) != round(target2[0])) 00719 PLERROR("StackedFocusedAutoassociatorsNet::train(): similar" 00720 " example is not from same class!"); 00721 00722 // Find dissimilar example 00723 00724 int dissim_class_index = random_gen->multinomial_sample( 00725 other_classes_proportions((int)round(target[0]))); 00726 00727 int dissim_index = random_gen->uniform_multinomial_sample( 00728 class_datasets[dissim_class_index]->length()); 00729 00730 class_datasets[dissim_class_index]->getExample(dissim_index, 00731 dissimilar_example, target2, weight2); 00732 00733 if(((int)round(target[0])) == ((int)round(target2[0]))) 00734 PLERROR("StackedFocusedAutoassociatorsNet::train(): dissimilar" 00735 " example is from same class!"); 00736 } 00737 greedyStep( input, target, i, train_costs, *this_stage, 00738 similar_example, dissimilar_example); 00739 train_stats->update( train_costs ); 00740 00741 if( pb ) 00742 pb->update( *this_stage - init_stage + 1 ); 00743 } 00744 } 00745 00746 /***** fine-tuning by gradient descent *****/ 00747 if( stage < nstages ) 00748 { 00749 00750 MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl; 00751 MODULE_LOG << " stage = " << stage << endl; 00752 MODULE_LOG << " nstages = " << nstages << endl; 00753 MODULE_LOG << " fine_tuning_learning_rate = " << 00754 fine_tuning_learning_rate << endl; 00755 00756 init_stage = stage; 00757 if( report_progress && stage < nstages ) 00758 pb = new ProgressBar( "Fine-tuning parameters of all layers of " 00759 + classname(), 00760 nstages - init_stage ); 00761 00762 setLearningRate( fine_tuning_learning_rate ); 00763 train_costs.fill(MISSING_VALUE); 00764 00765 if( !do_not_use_knn_classifier ) 00766 { 00767 similar_example_representation.resize( 00768 layers[n_layers-1]->size); 00769 dissimilar_example_representation.resize( 00770 layers[n_layers-1]->size); 00771 dissimilar_gradient_contribution.resize( 00772 layers[n_layers-1]->size); 00773 similar_example.resize(inputsize()); 00774 dissimilar_example.resize(inputsize()); 00775 } 00776 00777 final_cost_input.resize(n_classes); 00778 final_cost_value.resize(2); // Should be resized anyways 00779 final_cost_gradient.resize(n_classes); 00780 00781 for( ; stage<nstages ; stage++ ) 00782 { 00783 sample = stage % nsamples; 00784 if( !fast_exact_is_equal( fine_tuning_decrease_ct, 0. ) ) 00785 setLearningRate( fine_tuning_learning_rate 00786 / (1. + fine_tuning_decrease_ct * stage ) ); 00787 00788 train_set->getExample( sample, input, target, weight ); 00789 00790 if( !do_not_use_knn_classifier ) 00791 { 00792 // Find similar example 00793 00794 int sim_index = random_gen->uniform_multinomial_sample(k_neighbors); 00795 class_datasets[(int)round(target[0])]->getExample( 00796 nearest_neighbors_indices(sample,sim_index), 00797 similar_example, target2, weight2); 00798 00799 if(((int)round(target[0])) != ((int)round(target2[0]))) 00800 PLERROR("StackedFocusedAutoassociatorsNet::train(): similar" 00801 " example is not from same class!"); 00802 00803 // Find dissimilar example 00804 00805 int dissim_class_index = random_gen->multinomial_sample( 00806 other_classes_proportions((int)round(target[0]))); 00807 00808 int dissim_index = random_gen->uniform_multinomial_sample( 00809 class_datasets[dissim_class_index]->length()); 00810 00811 class_datasets[dissim_class_index]->getExample(dissim_index, 00812 dissimilar_example, target2, weight2); 00813 00814 if(((int)round(target[0])) == ((int)round(target2[0]))) 00815 PLERROR("StackedFocusedAutoassociatorsNet::train(): dissimilar" 00816 " example is from same class!"); 00817 } 00818 00819 fineTuningStep( input, target, train_costs, 00820 similar_example, dissimilar_example); 00821 train_stats->update( train_costs ); 00822 00823 if( pb ) 00824 pb->update( stage - init_stage + 1 ); 00825 } 00826 00827 if(verbosity>2) 00828 { 00829 Vec train_stats_vec = train_stats->getMean(); 00830 cout << "similarity_cost = " << train_stats_vec[train_stats_vec.length()-3] << endl; 00831 cout << "dissimilarity_cost = " << train_stats_vec[train_stats_vec.length()-2] << endl; 00832 cout << "metric_cost = " << train_stats_vec[train_stats_vec.length()-1] << endl; 00833 } 00834 } 00835 00836 train_stats->finalize(); 00837 MODULE_LOG << " train costs = " << train_stats->getMean() << endl; 00838 00839 00840 // Update currently_trained_layer 00841 if(stage > 0) 00842 currently_trained_layer = n_layers; 00843 else 00844 { 00845 currently_trained_layer = n_layers-1; 00846 while(currently_trained_layer>1 00847 && greedy_stages[currently_trained_layer-1] <= 0) 00848 currently_trained_layer--; 00849 } 00850 } 00851 00852 void StackedFocusedAutoassociatorsNet::greedyStep( 00853 const Vec& input, const Vec& target, int index, 00854 Vec train_costs, int this_stage, Vec similar_example, Vec dissimilar_example ) 00855 { 00856 PLASSERT( index < n_layers ); 00857 real lr; 00858 train_set_representations_up_to_date = false; 00859 00860 if( !fast_exact_is_equal( supervised_greedy_learning_rate, 0 ) ) 00861 { 00862 // Get similar example representation 00863 00864 computeRepresentation(similar_example, similar_example_representation, 00865 index+1); 00866 00867 // Get dissimilar example representation 00868 00869 computeRepresentation(dissimilar_example, dissimilar_example_representation, 00870 index+1); 00871 } 00872 00873 // Get example representation 00874 00875 computeRepresentation(input, previous_input_representation, 00876 index); 00877 greedy_connections[index]->fprop(previous_input_representation, 00878 greedy_activation); 00879 greedy_layers[index]->fprop(greedy_activation, 00880 greedy_expectation); 00881 input_representation << greedy_expectation.subVec(0,layers[index+1]->size); 00882 00883 // Autoassociator learning 00884 00885 if( !fast_exact_is_equal( greedy_learning_rate, 0 ) ) 00886 { 00887 if( !fast_exact_is_equal( greedy_decrease_ct , 0 ) ) 00888 lr = greedy_learning_rate/(1 + greedy_decrease_ct 00889 * this_stage); 00890 else 00891 lr = greedy_learning_rate; 00892 00893 layers[index]->setLearningRate( lr ); 00894 greedy_connections[index]->setLearningRate( lr ); 00895 reconstruction_connections[index]->setLearningRate( lr ); 00896 greedy_layers[index]->setLearningRate( lr ); 00897 00898 reconstruction_connections[ index ]->fprop( greedy_expectation, 00899 reconstruction_activations); 00900 layers[ index ]->fprop( reconstruction_activations, 00901 layers[ index ]->expectation); 00902 00903 layers[ index ]->activation << reconstruction_activations; 00904 layers[ index ]->setExpectationByRef(layers[ index ]->expectation); 00905 real rec_err = layers[ index ]->fpropNLL(previous_input_representation); 00906 train_costs[index] = rec_err; 00907 00908 layers[ index ]->bpropNLL(previous_input_representation, rec_err, 00909 reconstruction_activation_gradients); 00910 } 00911 00912 if( !fast_exact_is_equal( supervised_greedy_learning_rate, 0 ) ) 00913 { 00914 // Compute supervised gradient 00915 00916 // Similar example contribution 00917 substract(input_representation,similar_example_representation, 00918 expectation_gradients[index+1]); 00919 expectation_gradients[index+1] *= 4/sqrt((real)layers[index+1]->size); 00920 00921 // Dissimilar example contribution 00922 real dist = sqrt(powdistance(input_representation, 00923 dissimilar_example_representation, 00924 2)); 00925 00926 //if( dist == 0 ) 00927 // PLWARNING("StackedFocusedAutoassociatorsNet::fineTuningStep(): dissimilar" 00928 // " example representation is exactly the sample as the" 00929 // " input example. Gradient would be infinite! Skipping this" 00930 // " example..."); 00931 //else 00932 //{ 00933 substract(input_representation,dissimilar_example_representation, 00934 dissimilar_gradient_contribution); 00935 00936 dissimilar_gradient_contribution *= -2* dissimilar_example_cost_precision* 00937 safeexp(-dissimilar_example_cost_precision*dist/sqrt((real)layers[index+1]->size)); 00938 00939 expectation_gradients[index+1] += dissimilar_gradient_contribution; 00940 //} 00941 } 00942 00943 // RBM learning 00944 if( !fast_exact_is_equal( cd_learning_rate, 0 ) ) 00945 { 00946 greedy_layers[index]->setExpectation( greedy_expectation ); 00947 greedy_layers[index]->generateSample(); 00948 00949 // accumulate positive stats using the expectation 00950 // we deep-copy because the value will change during negative phase 00951 pos_down_val = expectations[index]; 00952 pos_up_val << greedy_layers[index]->expectation; 00953 00954 // down propagation, starting from a sample of layers[index+1] 00955 greedy_connections[index]->setAsUpInput( greedy_layers[index]->sample ); 00956 00957 layers[index]->getAllActivations( greedy_connections[index] ); 00958 layers[index]->computeExpectation(); 00959 layers[index]->generateSample(); 00960 00961 // negative phase 00962 greedy_connections[index]->setAsDownInput( layers[index]->sample ); 00963 greedy_layers[index]->getAllActivations( greedy_connections[index] ); 00964 greedy_layers[index]->computeExpectation(); 00965 // accumulate negative stats 00966 // no need to deep-copy because the values won't change before update 00967 neg_down_val = layers[index]->sample; 00968 neg_up_val = greedy_layers[index]->expectation; 00969 } 00970 00971 // Update hidden layer bias and weights 00972 00973 if( !fast_exact_is_equal( greedy_learning_rate, 0 ) ) 00974 { 00975 layers[ index ]->update(reconstruction_activation_gradients); 00976 00977 reconstruction_connections[ index ]->bpropUpdate( 00978 greedy_expectation, 00979 reconstruction_activations, 00980 reconstruction_expectation_gradients, 00981 reconstruction_activation_gradients); 00982 00983 greedy_layers[ index ]->bpropUpdate( 00984 greedy_activation, 00985 greedy_expectation, 00986 // reused 00987 reconstruction_activation_gradients, 00988 reconstruction_expectation_gradients); 00989 00990 greedy_connections[ index ]->bpropUpdate( 00991 previous_input_representation, 00992 greedy_activation, 00993 reconstruction_expectation_gradients, //reused 00994 reconstruction_activation_gradients); 00995 } 00996 00997 00998 if( !fast_exact_is_equal( supervised_greedy_learning_rate, 0 ) ) 00999 { 01000 if( !fast_exact_is_equal( supervised_greedy_decrease_ct , 0 ) ) 01001 lr = supervised_greedy_learning_rate/(1 + supervised_greedy_decrease_ct 01002 * this_stage); 01003 else 01004 lr = supervised_greedy_learning_rate; 01005 01006 layers[index]->setLearningRate( lr ); 01007 connections[index]->setLearningRate( lr ); 01008 layers[index+1]->setLearningRate( lr ); 01009 01010 layers[ index+1 ]->bpropUpdate( 01011 greedy_activation.subVec(0,layers[index+1]->size), 01012 greedy_expectation.subVec(0,layers[index+1]->size), 01013 activation_gradients[index+1], 01014 expectation_gradients[index+1]); 01015 01016 connections[ index ]->bpropUpdate( 01017 previous_input_representation, 01018 greedy_activation.subVec(0,layers[index+1]->size), 01019 expectation_gradients[index], 01020 activation_gradients[index+1]); 01021 } 01022 01023 // RBM updates 01024 01025 if( !fast_exact_is_equal( cd_learning_rate, 0 ) ) 01026 { 01027 if( !fast_exact_is_equal( cd_decrease_ct , 0 ) ) 01028 lr = cd_learning_rate/(1 + cd_decrease_ct 01029 * this_stage); 01030 else 01031 lr = cd_learning_rate; 01032 01033 layers[index]->setLearningRate( lr ); 01034 greedy_connections[index]->setLearningRate( lr ); 01035 greedy_layers[index]->setLearningRate( lr ); 01036 01037 layers[index]->update( pos_down_val, neg_down_val ); 01038 greedy_connections[index]->update( pos_down_val, pos_up_val, 01039 neg_down_val, neg_up_val ); 01040 greedy_layers[index]->update( pos_up_val, neg_up_val ); 01041 } 01042 } 01043 01044 void StackedFocusedAutoassociatorsNet::fineTuningStep( 01045 const Vec& input, const Vec& target, 01046 Vec& train_costs, Vec similar_example, Vec dissimilar_example ) 01047 { 01048 train_set_representations_up_to_date = false; 01049 01050 if( !do_not_use_knn_classifier ) 01051 { 01052 // Get similar example representation 01053 01054 computeRepresentation(similar_example, similar_example_representation, 01055 n_layers-1); 01056 01057 // Get dissimilar example representation 01058 01059 computeRepresentation(dissimilar_example, dissimilar_example_representation, 01060 n_layers-1); 01061 } 01062 01063 // Get example representation 01064 01065 computeRepresentation(input, previous_input_representation, 01066 n_layers-1); 01067 01068 // Compute supervised gradient 01069 01070 01071 if( !do_not_use_knn_classifier ) 01072 { 01073 // Similar example contribution 01074 substract(previous_input_representation,similar_example_representation, 01075 expectation_gradients[n_layers-1]); 01076 expectation_gradients[n_layers-1] *= 4/sqrt((real)layers[n_layers-1]->size); 01077 01078 train_costs[train_costs.length()-3] = 01079 2 * sqrt(powdistance(previous_input_representation, 01080 similar_example_representation, 01081 2)) / sqrt((real)layers[n_layers-1]->size); 01082 01083 // Dissimilar example contribution 01084 real dist = sqrt(powdistance(previous_input_representation, 01085 dissimilar_example_representation, 01086 2)); 01087 01088 train_costs[train_costs.length()-2] = 01089 2 * sqrt((real)layers[n_layers-1]->size) * safeexp( -dissimilar_example_cost_precision 01090 *dist/sqrt((real)layers[n_layers-1]->size)); 01091 train_costs.last() = train_costs[train_costs.length()-3] + 01092 train_costs[train_costs.length()-2]; 01093 //if( dist == 0 ) 01094 // PLWARNING("StackedFocusedAutoassociatorsNet::fineTuningStep(): dissimilar" 01095 // " example representation is exactly the sample as the" 01096 // " input example. Gradient would be infinite! Skipping this" 01097 // " example..."); 01098 //else 01099 //{ 01100 01101 substract(previous_input_representation, 01102 dissimilar_example_representation, 01103 dissimilar_gradient_contribution); 01104 01105 dissimilar_gradient_contribution *= -2 * dissimilar_example_cost_precision* 01106 safeexp(-dissimilar_example_cost_precision*dist/sqrt((real)layers[n_layers-1]->size)); 01107 01108 expectation_gradients[n_layers-1] += dissimilar_gradient_contribution; 01109 //} 01110 } 01111 else 01112 { 01113 final_module->fprop( previous_input_representation, final_cost_input ); 01114 final_cost->fprop( final_cost_input, target, final_cost_value ); 01115 01116 final_cost->bpropUpdate( final_cost_input, target, 01117 final_cost_value[0], 01118 final_cost_gradient ); 01119 final_module->bpropUpdate( previous_input_representation, 01120 final_cost_input, 01121 expectation_gradients[ n_layers-1 ], 01122 final_cost_gradient ); 01123 } 01124 01125 for( int i=n_layers-1 ; i>0 ; i-- ) 01126 { 01127 layers[i]->bpropUpdate( activations[i], 01128 expectations[i], 01129 activation_gradients[i], 01130 expectation_gradients[i] ); 01131 01132 01133 connections[i-1]->bpropUpdate( expectations[i-1], 01134 activations[i], 01135 expectation_gradients[i-1], 01136 activation_gradients[i] ); 01137 } 01138 } 01139 01140 void StackedFocusedAutoassociatorsNet::computeRepresentation(const Vec& input, 01141 Vec& representation, 01142 int layer) const 01143 { 01144 if(layer == 0) 01145 { 01146 representation.resize(input.length()); 01147 expectations[0] << input; 01148 representation << input; 01149 return; 01150 } 01151 01152 expectations[0] << input; 01153 for( int i=0 ; i<layer; i++ ) 01154 { 01155 connections[i]->fprop( expectations[i], activations[i+1] ); 01156 layers[i+1]->fprop(activations[i+1],expectations[i+1]); 01157 } 01158 representation.resize(expectations[layer].length()); 01159 representation << expectations[layer]; 01160 } 01161 01162 void StackedFocusedAutoassociatorsNet::computeOutput(const Vec& input, Vec& output) const 01163 { 01164 if( do_not_use_knn_classifier & currently_trained_layer>n_layers-1 ) 01165 { 01166 computeRepresentation(input,input_representation, 01167 min(currently_trained_layer,n_layers-1)); 01168 final_module->fprop( input_representation, final_cost_input ); 01169 output[0] = argmax(final_cost_input); 01170 } 01171 else 01172 { 01173 updateTrainSetRepresentations(); 01174 01175 computeRepresentation(input,input_representation, 01176 min(currently_trained_layer,n_layers-1)); 01177 01178 computeNearestNeighbors(train_set_representations_vmat,input_representation, 01179 test_nearest_neighbors_indices); 01180 01181 test_votes.clear(); 01182 for(int i=0; i<test_nearest_neighbors_indices.length(); i++) 01183 test_votes[train_set_targets[test_nearest_neighbors_indices[i]]]++; 01184 01185 output[0] = argmax(test_votes); 01186 } 01187 } 01188 01189 void StackedFocusedAutoassociatorsNet::computeCostsFromOutputs(const Vec& input, const Vec& output, 01190 const Vec& target, Vec& costs) const 01191 { 01192 01193 //Assumes that computeOutput has been called 01194 01195 costs.resize( getTestCostNames().length() ); 01196 costs.fill( MISSING_VALUE ); 01197 01198 if( currently_trained_layer<n_layers 01199 && reconstruction_connections.length() != 0 ) 01200 { 01201 greedy_connections[currently_trained_layer-1]->fprop( 01202 expectations[currently_trained_layer-1], 01203 greedy_activation); 01204 01205 greedy_layers[currently_trained_layer-1]->fprop(greedy_activation, 01206 greedy_expectation); 01207 01208 reconstruction_connections[ currently_trained_layer-1 ]->fprop( 01209 greedy_expectation, 01210 reconstruction_activations); 01211 layers[ currently_trained_layer-1 ]->fprop( 01212 reconstruction_activations, 01213 layers[ currently_trained_layer-1 ]->expectation); 01214 01215 layers[ currently_trained_layer-1 ]->activation << 01216 reconstruction_activations; 01217 layers[ currently_trained_layer-1 ]->setExpectationByRef( 01218 layers[ currently_trained_layer-1 ]->expectation); 01219 costs[ currently_trained_layer-1 ] = 01220 layers[ currently_trained_layer-1 ]->fpropNLL( 01221 expectations[currently_trained_layer-1]); 01222 } 01223 01224 if( ((int)round(output[0])) == ((int)round(target[0])) ) 01225 costs[n_layers-1] = 0; 01226 else 01227 costs[n_layers-1] = 1; 01228 } 01229 01231 // test // 01233 void StackedFocusedAutoassociatorsNet::updateTrainSetRepresentations() const 01234 { 01235 if(!train_set_representations_up_to_date) 01236 { 01237 // Precompute training set examples' representation 01238 int l = min(currently_trained_layer,n_layers-1); 01239 Vec input( inputsize() ); 01240 Vec target( targetsize() ); 01241 Vec train_set_representation; 01242 real weight; 01243 01244 train_set_representations.resize(train_set->length(), layers[l]->size); 01245 train_set_targets.resize(train_set->length()); 01246 01247 for(int i=0; i<train_set->length(); i++) 01248 { 01249 train_set->getExample(i,input,target,weight); 01250 computeRepresentation(input,train_set_representation,l); 01251 train_set_representations(i) << train_set_representation; 01252 train_set_targets[i] = (int)round(target[0]); 01253 } 01254 train_set_representations_vmat = VMat(train_set_representations); 01255 01256 train_set_representations_up_to_date = true; 01257 } 01258 } 01259 01260 TVec<string> StackedFocusedAutoassociatorsNet::getTestCostNames() const 01261 { 01262 // Return the names of the costs computed by computeCostsFromOutputs 01263 // (these may or may not be exactly the same as what's returned by 01264 // getTrainCostNames). 01265 01266 TVec<string> cost_names(0); 01267 01268 for( int i=0; i<layers.size()-1; i++) 01269 cost_names.push_back("reconstruction_error_" + tostring(i+1)); 01270 01271 cost_names.append( "class_error" ); 01272 01273 return cost_names; 01274 } 01275 01276 TVec<string> StackedFocusedAutoassociatorsNet::getTrainCostNames() const 01277 { 01278 TVec<string> cost_names = getTestCostNames(); 01279 cost_names.push_back("similarity_cost"); 01280 cost_names.push_back("dissimilarity_cost"); 01281 cost_names.push_back("metric_cost"); 01282 return cost_names; 01283 } 01284 01285 void StackedFocusedAutoassociatorsNet::setTrainingSet(VMat training_set, bool call_forget) 01286 { 01287 inherited::setTrainingSet(training_set,call_forget); 01288 01289 train_set_representations_up_to_date = false; 01290 01291 if( do_not_use_knn_classifier && fast_exact_is_equal( supervised_greedy_learning_rate, 0 ) ) 01292 return; 01293 Vec input( inputsize() ); 01294 Vec target( targetsize() ); 01295 real weight; // unused 01296 01297 // Separate classes 01298 class_datasets.resize(n_classes); 01299 for(int k=0; k<n_classes; k++) 01300 { 01301 class_datasets[k] = new ClassSubsetVMatrix(); 01302 class_datasets[k]->classes.resize(1); 01303 class_datasets[k]->classes[0] = k; 01304 class_datasets[k]->source = training_set; 01305 class_datasets[k]->build(); 01306 } 01307 01308 // Find other classes proportions 01309 other_classes_proportions.resize(n_classes,n_classes); 01310 other_classes_proportions.fill(0); 01311 for(int k=0; k<n_classes; k++) 01312 { 01313 real sum = 0; 01314 for(int j=0; j<n_classes; j++) 01315 { 01316 if(j==k) continue; 01317 other_classes_proportions(k,j) = class_datasets[j]->length(); 01318 sum += class_datasets[j]->length(); 01319 } 01320 other_classes_proportions(k) /= sum; 01321 } 01322 01323 // Find training nearest neighbors 01324 input.resize(training_set->inputsize()); 01325 target.resize(training_set->targetsize()); 01326 nearest_neighbors_indices.resize(training_set->length(), k_neighbors); 01327 TVec<int> nearest_neighbors_indices_row; 01328 for(int k=0; k<n_classes; k++) 01329 { 01330 for(int i=0; i<class_datasets[k]->length(); i++) 01331 { 01332 class_datasets[k]->getExample(i,input,target,weight); 01333 nearest_neighbors_indices_row = nearest_neighbors_indices( 01334 class_datasets[k]->indices[i]); 01335 computeNearestNeighbors( 01336 new GetInputVMatrix((VMatrix *)class_datasets[k]),input, 01337 nearest_neighbors_indices_row, 01338 i); 01339 } 01340 } 01341 } 01342 01343 01344 //##### Helper functions ################################################## 01345 01346 void StackedFocusedAutoassociatorsNet::setLearningRate( real the_learning_rate ) 01347 { 01348 for( int i=0 ; i<n_layers-1 ; i++ ) 01349 { 01350 layers[i]->setLearningRate( the_learning_rate ); 01351 connections[i]->setLearningRate( the_learning_rate ); 01352 } 01353 layers[n_layers-1]->setLearningRate( the_learning_rate ); 01354 01355 if( do_not_use_knn_classifier ) 01356 { 01357 final_module->setLearningRate( the_learning_rate ); 01358 final_cost->setLearningRate( the_learning_rate ); 01359 } 01360 } 01361 01362 01363 } // end of namespace PLearn 01364 01365 01366 /* 01367 Local Variables: 01368 mode:c++ 01369 c-basic-offset:4 01370 c-file-style:"stroustrup" 01371 c-file-offsets:((innamespace . 0)(inline-open . 0)) 01372 indent-tabs-mode:nil 01373 fill-column:79 01374 End: 01375 */ 01376 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :