PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // DeepNonLocalManifoldParzen.cc 00004 // 00005 // Copyright (C) 2007 Hugo Larochelle 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Hugo Larochelle 00036 00040 #define PL_LOG_MODULE_NAME "DeepNonLocalManifoldParzen" 00041 #include <plearn/io/pl_log.h> 00042 00043 #include "DeepNonLocalManifoldParzen.h" 00044 #include <plearn/vmat/VMat_computeNearestNeighbors.h> 00045 #include <plearn/vmat/GetInputVMatrix.h> 00046 #include <plearn_learners/online/GradNNetLayerModule.h> 00047 #include <plearn/math/plapack.h> 00048 00049 namespace PLearn { 00050 using namespace std; 00051 00052 PLEARN_IMPLEMENT_OBJECT( 00053 DeepNonLocalManifoldParzen, 00054 "Neural net, trained layer-wise to predict the manifold structure of the data.", 00055 "This information is used in a Manifold Parzen Windows classifier." 00056 ); 00057 00058 DeepNonLocalManifoldParzen::DeepNonLocalManifoldParzen() : 00059 cd_learning_rate( 0. ), 00060 cd_decrease_ct( 0. ), 00061 greedy_learning_rate( 0. ), 00062 greedy_decrease_ct( 0. ), 00063 fine_tuning_learning_rate( 0. ), 00064 fine_tuning_decrease_ct( 0. ), 00065 k_neighbors( 1 ), 00066 n_components( 1 ), 00067 min_sigma_noise( 0 ), 00068 n_classes( -1 ), 00069 train_one_network_per_class( false ), 00070 output_connections_l1_penalty_factor( 0 ), 00071 output_connections_l2_penalty_factor( 0 ), 00072 save_manifold_parzen_parameters( false ), 00073 do_not_learn_sigma_noise( false ), 00074 n_layers( 0 ), 00075 currently_trained_layer( 0 ), 00076 manifold_parzen_parameters_are_up_to_date( false ) 00077 { 00078 // random_gen will be initialized in PLearner::build_() 00079 random_gen = new PRandom(); 00080 } 00081 00082 void DeepNonLocalManifoldParzen::declareOptions(OptionList& ol) 00083 { 00084 declareOption(ol, "cd_learning_rate", 00085 &DeepNonLocalManifoldParzen::cd_learning_rate, 00086 OptionBase::buildoption, 00087 "The learning rate used during the RBM " 00088 "contrastive divergence training.\n"); 00089 00090 declareOption(ol, "cd_decrease_ct", 00091 &DeepNonLocalManifoldParzen::cd_decrease_ct, 00092 OptionBase::buildoption, 00093 "The decrease constant of the learning rate used during " 00094 "the RBMs contrastive\n" 00095 "divergence training. When a hidden layer has finished " 00096 "its training,\n" 00097 "the learning rate is reset to it's initial value.\n"); 00098 00099 declareOption(ol, "greedy_learning_rate", 00100 &DeepNonLocalManifoldParzen::greedy_learning_rate, 00101 OptionBase::buildoption, 00102 "The learning rate used during the autoassociator " 00103 "gradient descent training.\n"); 00104 00105 declareOption(ol, "greedy_decrease_ct", 00106 &DeepNonLocalManifoldParzen::greedy_decrease_ct, 00107 OptionBase::buildoption, 00108 "The decrease constant of the learning rate used during " 00109 "the autoassociator\n" 00110 "gradient descent training. When a hidden layer has finished " 00111 "its training,\n" 00112 "the learning rate is reset to it's initial value.\n"); 00113 00114 declareOption(ol, "fine_tuning_learning_rate", 00115 &DeepNonLocalManifoldParzen::fine_tuning_learning_rate, 00116 OptionBase::buildoption, 00117 "The learning rate used during the fine tuning gradient descent.\n"); 00118 00119 declareOption(ol, "fine_tuning_decrease_ct", 00120 &DeepNonLocalManifoldParzen::fine_tuning_decrease_ct, 00121 OptionBase::buildoption, 00122 "The decrease constant of the learning rate used during " 00123 "fine tuning\n" 00124 "gradient descent.\n"); 00125 00126 declareOption(ol, "training_schedule", 00127 &DeepNonLocalManifoldParzen::training_schedule, 00128 OptionBase::buildoption, 00129 "Number of examples to use during each phase of greedy pre-training.\n" 00130 "The number of fine-tunig steps is defined by nstages.\n" 00131 ); 00132 00133 declareOption(ol, "layers", &DeepNonLocalManifoldParzen::layers, 00134 OptionBase::buildoption, 00135 "The layers of units in the network. The first element\n" 00136 "of this vector should be the input layer and the\n" 00137 "subsequent elements should be the hidden layers. The\n" 00138 "output layer should not be included in layers.\n"); 00139 00140 declareOption(ol, "connections", &DeepNonLocalManifoldParzen::connections, 00141 OptionBase::buildoption, 00142 "The weights of the connections between the layers.\n"); 00143 00144 declareOption(ol, "reconstruction_connections", 00145 &DeepNonLocalManifoldParzen::reconstruction_connections, 00146 OptionBase::buildoption, 00147 "The reconstruction weights of the autoassociators.\n"); 00148 00149 declareOption(ol, "k_neighbors", 00150 &DeepNonLocalManifoldParzen::k_neighbors, 00151 OptionBase::buildoption, 00152 "Number of nearest neighbors to use to learn " 00153 "the manifold structure..\n"); 00154 00155 declareOption(ol, "n_components", 00156 &DeepNonLocalManifoldParzen::n_components, 00157 OptionBase::buildoption, 00158 "Dimensionality of the manifold.\n"); 00159 00160 declareOption(ol, "min_sigma_noise", 00161 &DeepNonLocalManifoldParzen::min_sigma_noise, 00162 OptionBase::buildoption, 00163 "Minimum value for the noise variance.\n"); 00164 00165 declareOption(ol, "n_classes", 00166 &DeepNonLocalManifoldParzen::n_classes, 00167 OptionBase::buildoption, 00168 "Number of classes. If n_classes = 1, learner will output\n" 00169 "log likelihood of a given input. If n_classes > 1,\n" 00170 "classification will be performed.\n"); 00171 00172 declareOption(ol, "train_one_network_per_class", 00173 &DeepNonLocalManifoldParzen::train_one_network_per_class, 00174 OptionBase::buildoption, 00175 "Indication that one network per class should be trained.\n"); 00176 00177 declareOption(ol, "output_connections_l1_penalty_factor", 00178 &DeepNonLocalManifoldParzen::output_connections_l1_penalty_factor, 00179 OptionBase::buildoption, 00180 "Output weights L1 penalty factor.\n"); 00181 00182 declareOption(ol, "output_connections_l2_penalty_factor", 00183 &DeepNonLocalManifoldParzen::output_connections_l2_penalty_factor, 00184 OptionBase::buildoption, 00185 "Output weights L2 penalty factor.\n"); 00186 00187 declareOption(ol, "save_manifold_parzen_parameters", 00188 &DeepNonLocalManifoldParzen::save_manifold_parzen_parameters, 00189 OptionBase::buildoption, 00190 "Indication that the parameters for the manifold parzen\n" 00191 "windows estimator should be saved during test, to speed up " 00192 "testing.\n"); 00193 00194 declareOption(ol, "do_not_learn_sigma_noise", 00195 &DeepNonLocalManifoldParzen::do_not_learn_sigma_noise, 00196 OptionBase::buildoption, 00197 "Indication that the value of sigma noise should not be learned.\n"); 00198 00199 declareOption(ol, "use_test_centric_nlmp", 00200 &DeepNonLocalManifoldParzen::use_test_centric_nlmp, 00201 OptionBase::buildoption, 00202 "Indication that the Test-Centric NLMP variant should " 00203 "be used.\n" 00204 "In this case, train_one_network_per_class must be true.\n"); 00205 00206 declareOption(ol, "greedy_stages", 00207 &DeepNonLocalManifoldParzen::greedy_stages, 00208 OptionBase::learntoption, 00209 "Number of training samples seen in the different greedy " 00210 "phases.\n" 00211 ); 00212 00213 declareOption(ol, "n_layers", &DeepNonLocalManifoldParzen::n_layers, 00214 OptionBase::learntoption, 00215 "Number of layers.\n" 00216 ); 00217 00218 declareOption(ol, "output_connections", 00219 &DeepNonLocalManifoldParzen::output_connections, 00220 OptionBase::learntoption, 00221 "Output weights.\n" 00222 ); 00223 00224 declareOption(ol, "train_set", 00225 &DeepNonLocalManifoldParzen::train_set, 00226 OptionBase::learntoption, 00227 "Training set.\n" 00228 ); 00229 00230 // Now call the parent class' declareOptions 00231 inherited::declareOptions(ol); 00232 } 00233 00234 void DeepNonLocalManifoldParzen::build_() 00235 { 00236 // ### This method should do the real building of the object, 00237 // ### according to set 'options', in *any* situation. 00238 // ### Typical situations include: 00239 // ### - Initial building of an object from a few user-specified options 00240 // ### - Building of a "reloaded" object: i.e. from the complete set of 00241 // ### all serialised options. 00242 // ### - Updating or "re-building" of an object after a few "tuning" 00243 // ### options have been modified. 00244 // ### You should assume that the parent class' build_() has already been 00245 // ### called. 00246 00247 MODULE_LOG << "build_() called" << endl; 00248 00249 if(inputsize_ > 0 ) 00250 { 00251 // Initialize some learnt variables 00252 n_layers = layers.length(); 00253 00254 // Builds some variables using the training set 00255 setTrainingSet(train_set, false); 00256 00257 if( n_classes <= 0 ) 00258 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00259 "n_classes should be > 0.\n"); 00260 test_votes.resize(n_classes); 00261 00262 if( k_neighbors <= 0 ) 00263 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00264 "k_neighbors should be > 0.\n"); 00265 00266 if( weightsize_ > 0 ) 00267 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00268 "usage of weighted samples (weight size > 0) is not\n" 00269 "implemented yet.\n"); 00270 00271 if( training_schedule.length() != n_layers-1 ) 00272 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00273 "training_schedule should have %d elements.\n", 00274 n_layers-1); 00275 00276 if( n_components < 1 || n_components > inputsize_) 00277 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00278 "n_components should be > 0 and < or = to inputsize.\n"); 00279 00280 if( min_sigma_noise < 0) 00281 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00282 "min_sigma_noise should be > or = to 0.\n"); 00283 00284 if( use_test_centric_nlmp && !train_one_network_per_class ) 00285 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00286 "train_one_network_per_class must be true for " 00287 "Test-Centric NLMP variant.\n"); 00288 00289 if( use_test_centric_nlmp && n_classes <= 1) 00290 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00291 "n_classes must be > 1 for " 00292 "Test-Centric NLMP variant.\n"); 00293 00294 00295 if(greedy_stages.length() == 0) 00296 { 00297 greedy_stages.resize(n_layers-1); 00298 greedy_stages.clear(); 00299 } 00300 00301 if(stage > 0) 00302 currently_trained_layer = n_layers; 00303 else 00304 { 00305 currently_trained_layer = n_layers-1; 00306 while(currently_trained_layer>1 00307 && greedy_stages[currently_trained_layer-1] <= 0) 00308 currently_trained_layer--; 00309 } 00310 00311 build_layers_and_connections(); 00312 00313 if( train_one_network_per_class ) 00314 { 00315 if( n_classes == 1 ) 00316 PLERROR("DeepNonLocalManifoldParzen::build_() - \n" 00317 "train_one_network_per_class is useless for\n" 00318 "n_classes == 1.\n"); 00319 if( all_layers.length() != n_classes ) 00320 { 00321 all_layers.resize( n_classes); 00322 for( int i=0; i<all_layers.length(); i++ ) 00323 { 00324 CopiesMap copies; 00325 all_layers[i] = layers->deepCopy(copies); 00326 } 00327 } 00328 if( all_connections.length() != n_classes ) 00329 { 00330 all_connections.resize( n_classes); 00331 for( int i=0; i<all_connections.length(); i++ ) 00332 { 00333 CopiesMap copies; 00334 all_connections[i] = connections->deepCopy(copies); 00335 } 00336 } 00337 if( all_reconstruction_connections.length() != n_classes ) 00338 { 00339 all_reconstruction_connections.resize( n_classes); 00340 for( int i=0; i<all_reconstruction_connections.length(); i++ ) 00341 { 00342 CopiesMap copies; 00343 all_reconstruction_connections[i] = 00344 reconstruction_connections->deepCopy(copies); 00345 } 00346 } 00347 if( all_output_connections.length() != n_classes ) 00348 { 00349 all_output_connections.resize( n_classes); 00350 for( int i=0; i<all_output_connections.length(); i++ ) 00351 { 00352 CopiesMap copies; 00353 all_output_connections[i] = 00354 output_connections->deepCopy(copies); 00355 } 00356 } 00357 } 00358 } 00359 } 00360 00361 void DeepNonLocalManifoldParzen::build_layers_and_connections() 00362 { 00363 MODULE_LOG << "build_layers_and_connections() called" << endl; 00364 00365 if( connections.length() != n_layers-1 ) 00366 PLERROR("DeepNonLocalManifoldParzen::build_layers_and_connections() - \n" 00367 "there should be %d connections.\n", 00368 n_layers-1); 00369 00370 if( !fast_exact_is_equal( greedy_learning_rate, 0 ) 00371 && reconstruction_connections.length() != n_layers-1 ) 00372 PLERROR("DeepNonLocalManifoldParzen::build_layers_and_connections() - \n" 00373 "there should be %d reconstruction connections.\n", 00374 n_layers-1); 00375 00376 if( !( reconstruction_connections.length() == 0 00377 || reconstruction_connections.length() == n_layers-1 ) ) 00378 PLERROR("DeepNonLocalManifoldParzen::build_layers_and_connections() - \n" 00379 "there should be either 0 or %d reconstruction connections.\n", 00380 n_layers-1); 00381 00382 00383 if(layers[0]->size != inputsize_) 00384 PLERROR("DeepNonLocalManifoldParzen::build_layers_and_connections() - \n" 00385 "layers[0] should have a size of %d.\n", 00386 inputsize_); 00387 00388 activations.resize( n_layers ); 00389 expectations.resize( n_layers ); 00390 activation_gradients.resize( n_layers ); 00391 expectation_gradients.resize( n_layers ); 00392 00393 for( int i=0 ; i<n_layers-1 ; i++ ) 00394 { 00395 if( layers[i]->size != connections[i]->down_size ) 00396 PLERROR("DeepNonLocalManifoldParzen::build_layers_and_connections() " 00397 "- \n" 00398 "connections[%i] should have a down_size of %d.\n", 00399 i, layers[i]->size); 00400 00401 if( connections[i]->up_size != layers[i+1]->size ) 00402 PLERROR("DeepNonLocalManifoldParzen::build_layers_and_connections() " 00403 "- \n" 00404 "connections[%i] should have a up_size of %d.\n", 00405 i, layers[i+1]->size); 00406 00407 if( !(layers[i]->random_gen) ) 00408 { 00409 layers[i]->random_gen = random_gen; 00410 layers[i]->forget(); 00411 } 00412 00413 if( !(connections[i]->random_gen) ) 00414 { 00415 connections[i]->random_gen = random_gen; 00416 connections[i]->forget(); 00417 } 00418 00419 if( reconstruction_connections.length() != 0 00420 && !(reconstruction_connections[i]->random_gen) ) 00421 { 00422 reconstruction_connections[i]->random_gen = random_gen; 00423 reconstruction_connections[i]->forget(); 00424 } 00425 00426 activations[i].resize( layers[i]->size ); 00427 expectations[i].resize( layers[i]->size ); 00428 activation_gradients[i].resize( layers[i]->size ); 00429 expectation_gradients[i].resize( layers[i]->size ); 00430 } 00431 00432 if( !(layers[n_layers-1]->random_gen) ) 00433 { 00434 layers[n_layers-1]->random_gen = random_gen; 00435 layers[n_layers-1]->forget(); 00436 } 00437 activations[n_layers-1].resize( layers[n_layers-1]->size ); 00438 expectations[n_layers-1].resize( layers[n_layers-1]->size ); 00439 activation_gradients[n_layers-1].resize( layers[n_layers-1]->size ); 00440 expectation_gradients[n_layers-1].resize( layers[n_layers-1]->size ); 00441 00442 int output_size = n_components*inputsize() + inputsize() + (do_not_learn_sigma_noise ? 0 : 1); 00443 all_outputs.resize( output_size ); 00444 00445 if( !output_connections || output_connections->output_size != output_size) 00446 { 00447 PP<GradNNetLayerModule> ow = new GradNNetLayerModule; 00448 ow->input_size = layers[n_layers-1]->size; 00449 ow->output_size = output_size; 00450 ow->L1_penalty_factor = output_connections_l1_penalty_factor; 00451 ow->L2_penalty_factor = output_connections_l2_penalty_factor; 00452 ow->random_gen = random_gen; 00453 ow->build(); 00454 output_connections = ow; 00455 } 00456 00457 if( !(output_connections->random_gen) ) 00458 { 00459 output_connections->random_gen = random_gen; 00460 output_connections->forget(); 00461 } 00462 } 00463 00464 // ### Nothing to add here, simply calls build_ 00465 void DeepNonLocalManifoldParzen::build() 00466 { 00467 inherited::build(); 00468 build_(); 00469 } 00470 00471 00472 void DeepNonLocalManifoldParzen::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00473 { 00474 inherited::makeDeepCopyFromShallowCopy(copies); 00475 00476 // deepCopyField(, copies); 00477 00478 // Public options 00479 deepCopyField(training_schedule, copies); 00480 deepCopyField(layers, copies); 00481 deepCopyField(connections, copies); 00482 deepCopyField(reconstruction_connections, copies); 00483 00484 // Protected options 00485 deepCopyField(activations, copies); 00486 deepCopyField(expectations, copies); 00487 deepCopyField(activation_gradients, copies); 00488 deepCopyField(expectation_gradients, copies); 00489 deepCopyField(reconstruction_activations, copies); 00490 deepCopyField(reconstruction_activation_gradients, copies); 00491 deepCopyField(reconstruction_expectation_gradients, copies); 00492 deepCopyField(output_connections, copies); 00493 deepCopyField(all_layers, copies); 00494 deepCopyField(all_connections, copies); 00495 deepCopyField(all_reconstruction_connections, copies); 00496 deepCopyField(all_output_connections, copies); 00497 deepCopyField(input_representation, copies); 00498 deepCopyField(previous_input_representation, copies); 00499 deepCopyField(all_outputs, copies); 00500 deepCopyField(all_outputs_gradient, copies); 00501 deepCopyField(F, copies); 00502 deepCopyField(F_copy, copies); 00503 deepCopyField(mu, copies); 00504 deepCopyField(pre_sigma_noise, copies); 00505 deepCopyField(Ut, copies); 00506 deepCopyField(U, copies); 00507 deepCopyField(V, copies); 00508 deepCopyField(z, copies); 00509 deepCopyField(inv_Sigma_F, copies); 00510 deepCopyField(inv_Sigma_z, copies); 00511 deepCopyField(temp_ncomp, copies); 00512 deepCopyField(diff_neighbor_input, copies); 00513 deepCopyField(sm_svd, copies); 00514 deepCopyField(S, copies); 00515 deepCopyField(uk, copies); 00516 deepCopyField(fk, copies); 00517 deepCopyField(uk2, copies); 00518 deepCopyField(inv_sigma_zj, copies); 00519 deepCopyField(zj, copies); 00520 deepCopyField(inv_sigma_fk, copies); 00521 deepCopyField(diff, copies); 00522 deepCopyField(pos_down_val, copies); 00523 deepCopyField(pos_up_val, copies); 00524 deepCopyField(neg_down_val, copies); 00525 deepCopyField(neg_up_val, copies); 00526 deepCopyField(eigenvectors, copies); 00527 deepCopyField(eigenvalues, copies); 00528 deepCopyField(sigma_noises, copies); 00529 deepCopyField(mus, copies); 00530 deepCopyField(class_datasets, copies); 00531 deepCopyField(nearest_neighbors_indices, copies); 00532 deepCopyField(test_votes, copies); 00533 deepCopyField(greedy_stages, copies); 00534 } 00535 00536 00537 int DeepNonLocalManifoldParzen::outputsize() const 00538 { 00539 //if(currently_trained_layer < n_layers) 00540 // return layers[currently_trained_layer]->size; 00541 //return layers[n_layers-1]->size; 00542 return 1; 00543 } 00544 00545 void DeepNonLocalManifoldParzen::forget() 00546 { 00550 00557 inherited::forget(); 00558 00559 manifold_parzen_parameters_are_up_to_date = false; 00560 00561 if( train_one_network_per_class ) 00562 { 00563 for(int c = 0; c<n_classes; c++ ) 00564 { 00565 for( int i=0 ; i<n_layers-1 ; i++ ) 00566 all_connections[c][i]->forget(); 00567 00568 for( int i=0 ; i<n_layers ; i++ ) 00569 all_layers[c][i]->forget(); 00570 00571 for( int i=0; i<all_reconstruction_connections[c].length(); i++) 00572 all_reconstruction_connections[c][i]->forget(); 00573 00574 if( all_output_connections[c] ) 00575 all_output_connections[c]->forget(); 00576 } 00577 } 00578 else 00579 { 00580 for( int i=0 ; i<n_layers-1 ; i++ ) 00581 connections[i]->forget(); 00582 00583 for( int i=0 ; i<n_layers ; i++ ) 00584 layers[i]->forget(); 00585 00586 for( int i=0; i<reconstruction_connections.length(); i++) 00587 reconstruction_connections[i]->forget(); 00588 00589 if( output_connections ) 00590 output_connections->forget(); 00591 00592 } 00593 00594 stage = 0; 00595 greedy_stages.clear(); 00596 } 00597 00598 void DeepNonLocalManifoldParzen::train() 00599 { 00600 MODULE_LOG << "train() called " << endl; 00601 MODULE_LOG << " training_schedule = " << training_schedule << endl; 00602 00603 Vec input( inputsize() ); 00604 Vec nearest_neighbor( inputsize() ); 00605 Mat nearest_neighbors( k_neighbors, inputsize() ); 00606 Vec target( targetsize() ); 00607 Vec target2( targetsize() ); 00608 real weight; // unused 00609 real weight2; // unused 00610 00611 TVec<string> train_cost_names = getTrainCostNames() ; 00612 Vec train_costs( train_cost_names.length() ); 00613 train_costs.fill(MISSING_VALUE) ; 00614 00615 int nsamples = train_set->length(); 00616 int sample; 00617 00618 PP<ProgressBar> pb; 00619 00620 // clear stats of previous epoch 00621 train_stats->forget(); 00622 00623 int init_stage; 00624 00625 /***** initial greedy training *****/ 00626 for( int i=0 ; i<n_layers-1 ; i++ ) 00627 { 00628 MODULE_LOG << "Training connection weights between layers " << i 00629 << " and " << i+1 << endl; 00630 00631 int end_stage = training_schedule[i]; 00632 int* this_stage = greedy_stages.subVec(i,1).data(); 00633 init_stage = *this_stage; 00634 00635 MODULE_LOG << " stage = " << *this_stage << endl; 00636 MODULE_LOG << " end_stage = " << end_stage << endl; 00637 MODULE_LOG << " greedy_learning_rate = " << greedy_learning_rate << endl; 00638 00639 if( report_progress && *this_stage < end_stage ) 00640 pb = new ProgressBar( "Training layer "+tostring(i) 00641 +" of "+classname(), 00642 end_stage - init_stage ); 00643 00644 train_costs.fill(MISSING_VALUE); 00645 reconstruction_activations.resize(layers[i]->size); 00646 reconstruction_activation_gradients.resize(layers[i]->size); 00647 reconstruction_expectation_gradients.resize(layers[i]->size); 00648 00649 pos_down_val.resize(layers[i]->size); 00650 pos_up_val.resize(layers[i+1]->size); 00651 neg_down_val.resize(layers[i]->size); 00652 neg_up_val.resize(layers[i+1]->size); 00653 00654 for( ; *this_stage<end_stage ; (*this_stage)++ ) 00655 { 00656 sample = *this_stage % nsamples; 00657 train_set->getExample(sample, input, target, weight); 00658 00659 if( train_one_network_per_class ) 00660 { 00661 int c = (int) target[0]; 00662 layers = all_layers[c]; 00663 connections = all_connections[c]; 00664 reconstruction_connections = all_reconstruction_connections[c]; 00665 output_connections = all_output_connections[c]; 00666 } 00667 greedyStep( input, target, i, train_costs, *this_stage); 00668 train_stats->update( train_costs ); 00669 00670 if( pb ) 00671 pb->update( *this_stage - init_stage + 1 ); 00672 } 00673 } 00674 00675 /***** fine-tuning by gradient descent *****/ 00676 if( stage < nstages ) 00677 { 00678 00679 if( stage == 0 ) 00680 { 00681 MODULE_LOG << "Finding the nearest neighbors" << endl; 00682 // Find training nearest neighbors 00683 TVec<int> nearest_neighbors_indices_row; 00684 nearest_neighbors_indices.resize(train_set->length(), k_neighbors); 00685 if( n_classes > 1 ) 00686 for(int k=0; k<n_classes; k++) 00687 { 00688 for(int i=0; i<class_datasets[k]->length(); i++) 00689 { 00690 class_datasets[k]->getExample(i,input,target,weight); 00691 nearest_neighbors_indices_row = nearest_neighbors_indices( 00692 class_datasets[k]->indices[i]); 00693 00694 computeNearestNeighbors( 00695 new GetInputVMatrix((VMatrix *)class_datasets[k]),input, 00696 nearest_neighbors_indices_row, 00697 i); 00698 } 00699 } 00700 else 00701 for(int i=0; i<train_set->length(); i++) 00702 { 00703 train_set->getExample(i,input,target,weight); 00704 nearest_neighbors_indices_row = nearest_neighbors_indices(i); 00705 computeNearestNeighbors( 00706 train_set,input, 00707 nearest_neighbors_indices_row, 00708 i); 00709 } 00710 00711 } 00712 00713 MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl; 00714 MODULE_LOG << " stage = " << stage << endl; 00715 MODULE_LOG << " nstages = " << nstages << endl; 00716 MODULE_LOG << " fine_tuning_learning_rate = " << 00717 fine_tuning_learning_rate << endl; 00718 00719 init_stage = stage; 00720 if( report_progress && stage < nstages ) 00721 pb = new ProgressBar( "Fine-tuning parameters of all layers of " 00722 + classname(), 00723 nstages - init_stage ); 00724 00725 train_costs.fill(MISSING_VALUE); 00726 00727 for( ; stage<nstages ; stage++ ) 00728 { 00729 sample = stage % nsamples; 00730 train_set->getExample( sample, input, target, weight ); 00731 00732 // Find nearest neighbors 00733 if( n_classes > 1 ) 00734 for( int k=0; k<k_neighbors; k++ ) 00735 { 00736 class_datasets[(int)round(target[0])]->getExample( 00737 nearest_neighbors_indices(sample,k), 00738 nearest_neighbor, target2, weight2); 00739 00740 if(round(target[0]) != round(target2[0])) 00741 PLERROR("DeepNonLocalManifoldParzen::train(): similar" 00742 " example is not from same class!"); 00743 nearest_neighbors(k) << nearest_neighbor; 00744 } 00745 else 00746 for( int k=0; k<k_neighbors; k++ ) 00747 { 00748 train_set->getExample( 00749 nearest_neighbors_indices(sample,k), 00750 nearest_neighbor, target2, weight2); 00751 nearest_neighbors(k) << nearest_neighbor; 00752 } 00753 00754 00755 if( train_one_network_per_class ) 00756 { 00757 int c = (int) target[0]; 00758 layers = all_layers[c]; 00759 connections = all_connections[c]; 00760 reconstruction_connections = all_reconstruction_connections[c]; 00761 output_connections = all_output_connections[c]; 00762 } 00763 00764 if( !fast_exact_is_equal( fine_tuning_decrease_ct, 0. ) ) 00765 setLearningRate( fine_tuning_learning_rate 00766 / (1. + fine_tuning_decrease_ct * stage ) ); 00767 else 00768 setLearningRate( fine_tuning_learning_rate ); 00769 00770 fineTuningStep( input, target, train_costs, 00771 nearest_neighbors); 00772 train_stats->update( train_costs ); 00773 00774 if( pb ) 00775 pb->update( stage - init_stage + 1 ); 00776 } 00777 } 00778 00779 train_stats->finalize(); 00780 MODULE_LOG << " train costs = " << train_stats->getMean() << endl; 00781 00782 // Update currently_trained_layer 00783 if(stage > 0) 00784 currently_trained_layer = n_layers; 00785 else 00786 { 00787 currently_trained_layer = n_layers-1; 00788 while(currently_trained_layer>1 00789 && greedy_stages[currently_trained_layer-1] <= 0) 00790 currently_trained_layer--; 00791 } 00792 } 00793 00794 void DeepNonLocalManifoldParzen::greedyStep( 00795 const Vec& input, const Vec& target, int index, 00796 Vec train_costs, int this_stage) 00797 { 00798 PLASSERT( index < n_layers ); 00799 real lr; 00800 manifold_parzen_parameters_are_up_to_date = false; 00801 00802 // Get example representation 00803 00804 computeRepresentation(input, previous_input_representation, 00805 index); 00806 connections[index]->fprop(previous_input_representation, 00807 activations[index+1]); 00808 layers[index+1]->fprop(activations[index+1], 00809 expectations[index+1]); 00810 00811 // Autoassociator learning 00812 if( !fast_exact_is_equal( greedy_learning_rate, 0 ) ) 00813 { 00814 if( !fast_exact_is_equal( greedy_decrease_ct , 0 ) ) 00815 lr = greedy_learning_rate/(1 + greedy_decrease_ct 00816 * this_stage); 00817 else 00818 lr = greedy_learning_rate; 00819 00820 layers[index]->setLearningRate( lr ); 00821 connections[index]->setLearningRate( lr ); 00822 reconstruction_connections[index]->setLearningRate( lr ); 00823 layers[index+1]->setLearningRate( lr ); 00824 00825 reconstruction_connections[ index ]->fprop( expectations[index+1], 00826 reconstruction_activations); 00827 layers[ index ]->fprop( reconstruction_activations, 00828 layers[ index ]->expectation); 00829 00830 layers[ index ]->activation << reconstruction_activations; 00831 layers[ index ]->setExpectationByRef(layers[ index ]->expectation); 00832 real rec_err = layers[ index ]->fpropNLL(previous_input_representation); 00833 train_costs[index] = rec_err; 00834 00835 layers[ index ]->bpropNLL(previous_input_representation, rec_err, 00836 reconstruction_activation_gradients); 00837 } 00838 00839 // RBM learning 00840 if( !fast_exact_is_equal( cd_learning_rate, 0 ) ) 00841 { 00842 layers[index+1]->setExpectation( expectations[index+1] ); 00843 layers[index+1]->generateSample(); 00844 00845 // accumulate positive stats using the expectation 00846 // we deep-copy because the value will change during negative phase 00847 pos_down_val = expectations[index]; 00848 pos_up_val << layers[index+1]->expectation; 00849 00850 // down propagation, starting from a sample of layers[index+1] 00851 connections[index]->setAsUpInput( layers[index+1]->sample ); 00852 00853 layers[index]->getAllActivations( connections[index] ); 00854 layers[index]->computeExpectation(); 00855 layers[index]->generateSample(); 00856 00857 // negative phase 00858 connections[index]->setAsDownInput( layers[index]->sample ); 00859 layers[index+1]->getAllActivations( connections[index] ); 00860 layers[index+1]->computeExpectation(); 00861 // accumulate negative stats 00862 // no need to deep-copy because the values won't change before update 00863 neg_down_val = layers[index]->sample; 00864 neg_up_val = layers[index+1]->expectation; 00865 } 00866 00867 // Update hidden layer bias and weights 00868 00869 if( !fast_exact_is_equal( greedy_learning_rate, 0 ) ) 00870 { 00871 layers[ index ]->update(reconstruction_activation_gradients); 00872 00873 reconstruction_connections[ index ]->bpropUpdate( 00874 expectations[index+1], 00875 reconstruction_activations, 00876 reconstruction_expectation_gradients, 00877 reconstruction_activation_gradients); 00878 00879 layers[ index+1 ]->bpropUpdate( 00880 activations[index+1], 00881 expectations[index+1], 00882 // reused 00883 reconstruction_activation_gradients, 00884 reconstruction_expectation_gradients); 00885 00886 connections[ index ]->bpropUpdate( 00887 previous_input_representation, 00888 activations[index+1], 00889 reconstruction_expectation_gradients, //reused 00890 reconstruction_activation_gradients); 00891 } 00892 00893 // RBM updates 00894 00895 if( !fast_exact_is_equal( cd_learning_rate, 0 ) ) 00896 { 00897 if( !fast_exact_is_equal( cd_decrease_ct , 0 ) ) 00898 lr = cd_learning_rate/(1 + cd_decrease_ct 00899 * this_stage); 00900 else 00901 lr = cd_learning_rate; 00902 00903 layers[index]->setLearningRate( lr ); 00904 connections[index]->setLearningRate( lr ); 00905 layers[index+1]->setLearningRate( lr ); 00906 00907 layers[index]->update( pos_down_val, neg_down_val ); 00908 connections[index]->update( pos_down_val, pos_up_val, 00909 neg_down_val, neg_up_val ); 00910 layers[index+1]->update( pos_up_val, neg_up_val ); 00911 } 00912 } 00913 00914 void DeepNonLocalManifoldParzen::computeManifoldParzenParameters( 00915 const Vec& input, Mat& F, Vec& mu, 00916 Vec& pre_sigma_noise, Mat& U, Vec& sm_svd, int target_class) const 00917 { 00918 if( train_one_network_per_class ) 00919 { 00920 PLASSERT( target_class >= 0 ); 00921 layers = all_layers[target_class]; 00922 connections = all_connections[target_class]; 00923 reconstruction_connections = all_reconstruction_connections[target_class]; 00924 output_connections = all_output_connections[target_class]; 00925 } 00926 00927 // Get example representation 00928 computeRepresentation(input, input_representation, 00929 n_layers-1); 00930 00931 // Get parameters 00932 output_connections->fprop( input_representation, all_outputs ); 00933 00934 F.resize(n_components, inputsize()); 00935 mu.resize(inputsize()); 00936 pre_sigma_noise.resize(1); 00937 00938 F << all_outputs.subVec(0,n_components * inputsize()).toMat( 00939 n_components, inputsize()); 00940 mu << all_outputs.subVec(n_components * inputsize(),inputsize()); 00941 if( do_not_learn_sigma_noise ) 00942 pre_sigma_noise.clear(); 00943 else 00944 pre_sigma_noise << all_outputs.subVec( (n_components+1) * inputsize(), 1 ); 00945 00946 F_copy.resize(F.length(),F.width()); 00947 sm_svd.resize(n_components); 00948 // N.B. this is the SVD of F' 00949 F_copy << F; 00950 lapackSVD(F_copy, Ut, S, V,'A',1.5); 00951 U.resize(n_components,inputsize()); 00952 for (int k=0;k<n_components;k++) 00953 { 00954 sm_svd[k] = mypow(S[k],2); 00955 U(k) << Ut(k); 00956 } 00957 } 00958 00959 00960 void DeepNonLocalManifoldParzen::fineTuningStep( 00961 const Vec& input, const Vec& target, 00962 Vec& train_costs, Mat nearest_neighbors ) 00963 { 00964 manifold_parzen_parameters_are_up_to_date = false; 00965 00966 if( n_classes > 1 ) 00967 computeManifoldParzenParameters( input, F, mu, pre_sigma_noise, U, sm_svd, 00968 (int)target[0]); 00969 else 00970 computeManifoldParzenParameters( input, F, mu, pre_sigma_noise, U, sm_svd); 00971 00972 real sigma_noise = pre_sigma_noise[0]* pre_sigma_noise[0] + min_sigma_noise; 00973 00974 real mahal = 0; 00975 real norm_term = 0; 00976 real dotp = 0; 00977 real coef = 0; 00978 real n = inputsize(); 00979 z.resize(k_neighbors,inputsize()); 00980 temp_ncomp.resize(n_components); 00981 inv_Sigma_z.resize(k_neighbors,inputsize()); 00982 inv_Sigma_z.clear(); 00983 real tr_inv_Sigma = 0; 00984 train_costs.last() = 0; 00985 for(int j=0; j<k_neighbors;j++) 00986 { 00987 zj = z(j); 00988 substract(nearest_neighbors(j),input,diff_neighbor_input); 00989 substract(diff_neighbor_input,mu,zj); 00990 00991 mahal = -0.5*pownorm(zj)/sigma_noise; 00992 norm_term = - n/2.0 * Log2Pi - 0.5*(n-n_components)*pl_log(sigma_noise); 00993 00994 inv_sigma_zj = inv_Sigma_z(j); 00995 inv_sigma_zj << zj; 00996 inv_sigma_zj /= sigma_noise; 00997 00998 if(j==0) 00999 tr_inv_Sigma = n/sigma_noise; 01000 01001 for(int k=0; k<n_components; k++) 01002 { 01003 uk = U(k); 01004 dotp = dot(zj,uk); 01005 coef = (1.0/(sm_svd[k]+sigma_noise) - 1.0/sigma_noise); 01006 multiplyAcc(inv_sigma_zj,uk,dotp*coef); 01007 mahal -= dotp*dotp*0.5*coef; 01008 norm_term -= 0.5*pl_log(sm_svd[k]+sigma_noise); 01009 if(j==0) 01010 tr_inv_Sigma += coef; 01011 } 01012 01013 train_costs.last() += -1*(norm_term + mahal); 01014 } 01015 01016 train_costs.last() /= k_neighbors; 01017 01018 inv_Sigma_F.resize( n_components, inputsize() ); 01019 inv_Sigma_F.clear(); 01020 for(int k=0; k<n_components; k++) 01021 { 01022 fk = F(k); 01023 inv_sigma_fk = inv_Sigma_F(k); 01024 inv_sigma_fk << fk; 01025 inv_sigma_fk /= sigma_noise; 01026 for(int k2=0; k2<n_components;k2++) 01027 { 01028 uk2 = U(k2); 01029 multiplyAcc(inv_sigma_fk,uk2, 01030 (1.0/(sm_svd[k2]+sigma_noise) - 1.0/sigma_noise)* 01031 dot(fk,uk2)); 01032 } 01033 } 01034 01035 all_outputs_gradient.resize((n_components+1) * inputsize()+ 01036 (do_not_learn_sigma_noise ? 0 : 1)); 01037 all_outputs_gradient.clear(); 01038 //coef = 1.0/train_set->length(); 01039 coef = 1.0/k_neighbors; 01040 for(int neighbor=0; neighbor<k_neighbors; neighbor++) 01041 { 01042 // dNLL/dF 01043 product(temp_ncomp,F,inv_Sigma_z(neighbor)); 01044 bprop_to_bases(all_outputs_gradient.subVec(0,n_components * inputsize()).toMat(n_components,inputsize()), 01045 inv_Sigma_F, 01046 temp_ncomp,inv_Sigma_z(neighbor), 01047 coef); 01048 01049 // dNLL/dmu 01050 multiplyAcc(all_outputs_gradient.subVec(n_components * inputsize(), 01051 inputsize()), 01052 inv_Sigma_z(neighbor), 01053 -coef) ; 01054 01055 if( !do_not_learn_sigma_noise ) 01056 { 01057 // dNLL/dsn 01058 all_outputs_gradient[(n_components + 1 )* inputsize()] += coef* 01059 0.5*(tr_inv_Sigma - pownorm(inv_Sigma_z(neighbor))) * 01060 2 * pre_sigma_noise[0]; 01061 } 01062 } 01063 01064 // Propagating supervised gradient 01065 output_connections->bpropUpdate( input_representation, all_outputs, 01066 expectation_gradients[n_layers-1], 01067 all_outputs_gradient); 01068 01069 for( int i=n_layers-1 ; i>0 ; i-- ) 01070 { 01071 layers[i]->bpropUpdate( activations[i], 01072 expectations[i], 01073 activation_gradients[i], 01074 expectation_gradients[i] ); 01075 01076 01077 connections[i-1]->bpropUpdate( expectations[i-1], 01078 activations[i], 01079 expectation_gradients[i-1], 01080 activation_gradients[i] ); 01081 } 01082 } 01083 01084 // grad_F += alpa ( M - v1 v2') 01085 void DeepNonLocalManifoldParzen::bprop_to_bases(const Mat& R, const Mat& M, 01086 const Vec& v1, 01087 const Vec& v2, real alpha) 01088 { 01089 #ifdef BOUNDCHECK 01090 if (M.length() != R.length() || M.width() != R.width() 01091 || v1.length()!=M.length() || M.width()!=v2.length() ) 01092 PLERROR("DeepNonLocalManifoldParzen::bprop_to_bases(): incompatible " 01093 "arguments' sizes"); 01094 #endif 01095 01096 const real* v_1=v1.data(); 01097 const real* v_2=v2.data(); 01098 for (int i=0;i<M.length();i++) 01099 { 01100 real* mi = M[i]; 01101 real* ri = R[i]; 01102 real v1i = v_1[i]; 01103 for (int j=0;j<M.width();j++) 01104 ri[j] += alpha*(mi[j] - v1i * v_2[j]); 01105 } 01106 } 01107 01108 01109 void DeepNonLocalManifoldParzen::computeRepresentation(const Vec& input, 01110 Vec& representation, 01111 int layer) const 01112 { 01113 if(layer == 0) 01114 { 01115 representation.resize(input.length()); 01116 expectations[0] << input; 01117 representation << input; 01118 return; 01119 } 01120 01121 expectations[0] << input; 01122 for( int i=0 ; i<layer; i++ ) 01123 { 01124 connections[i]->fprop( expectations[i], activations[i+1] ); 01125 layers[i+1]->fprop(activations[i+1],expectations[i+1]); 01126 } 01127 representation.resize(expectations[layer].length()); 01128 representation << expectations[layer]; 01129 } 01130 01131 void DeepNonLocalManifoldParzen::computeOutput(const Vec& input, Vec& output) const 01132 { 01133 01134 if( currently_trained_layer<n_layers 01135 && reconstruction_connections.length() != 0 ) 01136 { 01137 computeRepresentation(input, input_representation, 01138 currently_trained_layer); 01139 return; 01140 } 01141 01142 test_votes.resize(n_classes); 01143 test_votes.clear(); 01144 01145 // Variables for probability computations 01146 real log_p_x_g_y = 0; 01147 real mahal = 0; 01148 real norm_term = 0; 01149 real n = inputsize(); 01150 real dotp = 0; 01151 real coef = 0; 01152 real sigma_noise = 0; 01153 01154 Vec input_j(inputsize()); 01155 Vec target(targetsize()); 01156 real weight; 01157 01158 if( use_test_centric_nlmp ) 01159 { 01160 for( int i=0; i<n_classes; i++ ) 01161 { 01162 computeManifoldParzenParameters( input, F, mu, 01163 pre_sigma_noise, U, sm_svd, 01164 i); 01165 01166 sigma_noise = pre_sigma_noise[0]*pre_sigma_noise[0] 01167 + min_sigma_noise; 01168 01169 mahal = -0.5*pownorm(mu)/sigma_noise; 01170 norm_term = - n/2.0 * Log2Pi - 0.5*(n-n_components)* 01171 pl_log(sigma_noise); 01172 01173 for(int k=0; k<n_components; k++) 01174 { 01175 uk = U(k); 01176 dotp = dot(mu,uk); 01177 coef = (1.0/(sm_svd[k]+sigma_noise) - 1.0/sigma_noise); 01178 mahal -= dotp*dotp*0.5*coef; 01179 norm_term -= 0.5*pl_log(sm_svd[k]+sigma_noise); 01180 } 01181 01182 log_p_x_g_y = norm_term + mahal; 01183 test_votes[i] = log_p_x_g_y ; 01184 } 01185 } 01186 else 01187 { 01188 if( save_manifold_parzen_parameters ) 01189 { 01190 updateManifoldParzenParameters(); 01191 01192 int input_j_index; 01193 for( int i=0; i<n_classes; i++ ) 01194 { 01195 for( int j=0; 01196 j<(n_classes > 1 ? 01197 class_datasets[i]->length() 01198 : train_set->length()); 01199 j++ ) 01200 { 01201 if( n_classes > 1 ) 01202 { 01203 class_datasets[i]->getExample(j,input_j,target,weight); 01204 input_j_index = class_datasets[i]->indices[j]; 01205 } 01206 else 01207 { 01208 train_set->getExample(j,input_j,target,weight); 01209 input_j_index = j; 01210 } 01211 01212 U << eigenvectors[input_j_index]; 01213 sm_svd << eigenvalues(input_j_index); 01214 sigma_noise = sigma_noises[input_j_index]; 01215 mu << mus(input_j_index); 01216 01217 substract(input,input_j,diff_neighbor_input); 01218 substract(diff_neighbor_input,mu,diff); 01219 01220 mahal = -0.5*pownorm(diff)/sigma_noise; 01221 norm_term = - n/2.0 * Log2Pi - 0.5*(n-n_components)* 01222 pl_log(sigma_noise); 01223 01224 for(int k=0; k<n_components; k++) 01225 { 01226 uk = U(k); 01227 dotp = dot(diff,uk); 01228 coef = (1.0/(sm_svd[k]+sigma_noise) - 1.0/sigma_noise); 01229 mahal -= dotp*dotp*0.5*coef; 01230 norm_term -= 0.5*pl_log(sm_svd[k]+sigma_noise); 01231 } 01232 01233 if( j==0 ) 01234 log_p_x_g_y = norm_term + mahal; 01235 else 01236 log_p_x_g_y = logadd(norm_term + mahal, log_p_x_g_y); 01237 } 01238 01239 test_votes[i] = log_p_x_g_y; 01240 } 01241 } 01242 else 01243 { 01244 01245 for( int i=0; i<n_classes; i++ ) 01246 { 01247 for( int j=0; 01248 j<(n_classes > 1 ? 01249 class_datasets[i]->length() 01250 : train_set->length()); 01251 j++ ) 01252 { 01253 if( n_classes > 1 ) 01254 { 01255 class_datasets[i]->getExample(j,input_j,target,weight); 01256 computeManifoldParzenParameters( input_j, F, mu, 01257 pre_sigma_noise, U, sm_svd, 01258 (int) target[0]); 01259 } 01260 else 01261 { 01262 train_set->getExample(j,input_j,target,weight); 01263 computeManifoldParzenParameters( input_j, F, mu, 01264 pre_sigma_noise, U, sm_svd ); 01265 } 01266 01267 01268 sigma_noise = pre_sigma_noise[0]*pre_sigma_noise[0] 01269 + min_sigma_noise; 01270 01271 substract(input,input_j,diff_neighbor_input); 01272 substract(diff_neighbor_input,mu,diff); 01273 01274 mahal = -0.5*pownorm(diff)/sigma_noise; 01275 norm_term = - n/2.0 * Log2Pi - 0.5*(n-n_components)* 01276 pl_log(sigma_noise); 01277 01278 for(int k=0; k<n_components; k++) 01279 { 01280 uk = U(k); 01281 dotp = dot(diff,uk); 01282 coef = (1.0/(sm_svd[k]+sigma_noise) - 1.0/sigma_noise); 01283 mahal -= dotp*dotp*0.5*coef; 01284 norm_term -= 0.5*pl_log(sm_svd[k]+sigma_noise); 01285 } 01286 01287 if( j==0 ) 01288 log_p_x_g_y = norm_term + mahal; 01289 else 01290 log_p_x_g_y = logadd(norm_term + mahal, log_p_x_g_y); 01291 } 01292 01293 test_votes[i] = log_p_x_g_y; 01294 } 01295 } 01296 } 01297 if( n_classes > 1 ) 01298 output[0] = argmax(test_votes); 01299 else 01300 output[0] = test_votes[0]-pl_log(train_set->length()); 01301 } 01302 01303 void DeepNonLocalManifoldParzen::computeCostsFromOutputs(const Vec& input, const Vec& output, 01304 const Vec& target, Vec& costs) const 01305 { 01306 01307 //Assumes that computeOutput has been called 01308 01309 costs.resize( getTestCostNames().length() ); 01310 costs.fill( MISSING_VALUE ); 01311 01312 if( train_one_network_per_class ) 01313 { 01314 int c = (int) target[0]; 01315 layers = all_layers[c]; 01316 connections = all_connections[c]; 01317 reconstruction_connections = all_reconstruction_connections[c]; 01318 output_connections = all_output_connections[c]; 01319 } 01320 01321 if( currently_trained_layer<n_layers 01322 && reconstruction_connections.length() != 0 ) 01323 { 01324 reconstruction_connections[ currently_trained_layer-1 ]->fprop( 01325 expectations[currently_trained_layer], 01326 reconstruction_activations); 01327 layers[ currently_trained_layer-1 ]->fprop( 01328 reconstruction_activations, 01329 layers[ currently_trained_layer-1 ]->expectation); 01330 01331 layers[ currently_trained_layer-1 ]->activation << 01332 reconstruction_activations; 01333 layers[ currently_trained_layer-1 ]->setExpectationByRef( 01334 layers[ currently_trained_layer-1 ]->expectation); 01335 costs[ currently_trained_layer-1 ] = 01336 layers[ currently_trained_layer-1 ]->fpropNLL( 01337 expectations[currently_trained_layer-1]); 01338 } 01339 else 01340 { 01341 if( n_classes > 1 ) 01342 { 01343 int target_class = ((int)round(target[0])); 01344 if( ((int)round(output[0])) == target_class ) 01345 costs[n_layers-1] = 0; 01346 else 01347 costs[n_layers-1] = 1; 01348 if( !use_test_centric_nlmp ) 01349 costs[n_layers] = - test_votes[target_class] 01350 +pl_log(class_datasets[target_class]->length()); // Must take into account the 1/n normalization 01351 } 01352 else 01353 { 01354 costs[n_layers] = - output[0]; // 1/n normalization already accounted for 01355 } 01356 } 01357 } 01358 01360 // test // 01362 void DeepNonLocalManifoldParzen::updateManifoldParzenParameters() const 01363 { 01364 if(!manifold_parzen_parameters_are_up_to_date) 01365 { 01366 // Precompute manifold parzen parameters 01367 Vec input( inputsize() ); 01368 Vec target( targetsize() ); 01369 real weight; 01370 real sigma_noise; 01371 01372 eigenvectors.resize(train_set->length()); 01373 eigenvalues.resize(train_set->length(),n_components); 01374 sigma_noises.resize(train_set->length()); 01375 mus.resize(train_set->length(), inputsize()); 01376 01377 for( int i=0; i<train_set->length(); i++ ) 01378 { 01379 train_set->getExample(i,input,target,weight); 01380 01381 if( n_classes > 1 ) 01382 computeManifoldParzenParameters( input, F, mu, 01383 pre_sigma_noise, U, sm_svd, 01384 (int) target[0]); 01385 else 01386 computeManifoldParzenParameters( input, F, mu, 01387 pre_sigma_noise, U, sm_svd); 01388 01389 sigma_noise = pre_sigma_noise[0]*pre_sigma_noise[0] + min_sigma_noise; 01390 01391 eigenvectors[i].resize(n_components,inputsize()); 01392 eigenvectors[i] << U; 01393 eigenvalues(i) << sm_svd; 01394 sigma_noises[i] = sigma_noise; 01395 mus(i) << mu; 01396 } 01397 01398 manifold_parzen_parameters_are_up_to_date = true; 01399 } 01400 } 01401 01402 TVec<string> DeepNonLocalManifoldParzen::getTestCostNames() const 01403 { 01404 // Return the names of the costs computed by computeCostsFromOutputs 01405 // (these may or may not be exactly the same as what's returned by 01406 // getTrainCostNames). 01407 01408 TVec<string> cost_names(0); 01409 01410 for( int i=0; i<layers.size()-1; i++) 01411 cost_names.push_back("reconstruction_error_" + tostring(i+1)); 01412 01413 cost_names.append( "class_error" ); 01414 cost_names.append( "NLL" ); 01415 01416 return cost_names; 01417 } 01418 01419 TVec<string> DeepNonLocalManifoldParzen::getTrainCostNames() const 01420 { 01421 TVec<string> cost_names = getTestCostNames(); 01422 cost_names.append( "NLL_neighbors" ); 01423 return cost_names ; 01424 } 01425 01426 void DeepNonLocalManifoldParzen::setTrainingSet(VMat training_set, bool call_forget) 01427 { 01428 inherited::setTrainingSet(training_set,call_forget); 01429 01430 manifold_parzen_parameters_are_up_to_date = false; 01431 01432 // Separate classes 01433 if( n_classes > 1 ) 01434 { 01435 class_datasets.resize(n_classes); 01436 for(int k=0; k<n_classes; k++) 01437 { 01438 class_datasets[k] = new ClassSubsetVMatrix(); 01439 class_datasets[k]->classes.resize(1); 01440 class_datasets[k]->classes[0] = k; 01441 class_datasets[k]->source = training_set; 01442 class_datasets[k]->build(); 01443 } 01444 } 01445 01447 //class_proportions.resize(n_classes); 01448 //class_proportions.fill(0); 01449 //real sum = 0; 01450 //for(int k=0; k<n_classes; k++) 01451 //{ 01452 // class_proportions[k] = class_datasets[k]->length(); 01453 // sum += class_datasets[k]->length(); 01454 //} 01455 //class_proportions /= sum; 01456 } 01457 01458 01459 //##### Helper functions ################################################## 01460 01461 void DeepNonLocalManifoldParzen::setLearningRate( real the_learning_rate ) 01462 { 01463 for( int i=0 ; i<n_layers-1 ; i++ ) 01464 { 01465 layers[i]->setLearningRate( the_learning_rate ); 01466 connections[i]->setLearningRate( the_learning_rate ); 01467 } 01468 layers[n_layers-1]->setLearningRate( the_learning_rate ); 01469 output_connections->setLearningRate( the_learning_rate ); 01470 } 01471 01472 01473 } // end of namespace PLearn 01474 01475 01476 /* 01477 Local Variables: 01478 mode:c++ 01479 c-basic-offset:4 01480 c-file-style:"stroustrup" 01481 c-file-offsets:((innamespace . 0)(inline-open . 0)) 01482 indent-tabs-mode:nil 01483 fill-column:79 01484 End: 01485 */ 01486 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :