PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PseudolikelihoodRBM.cc 00004 // 00005 // Copyright (C) 2008 Hugo Larochelle 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Hugo Larochelle 00036 00040 #define PL_LOG_MODULE_NAME "PseudolikelihoodRBM" 00041 #include "PseudolikelihoodRBM.h" 00042 #include <plearn_learners/online/RBMLayer.h> 00043 #include <plearn/io/pl_log.h> 00044 #include <plearn/math/TMat_sort.h> 00045 00046 #define minibatch_hack 0 // Do we force the minibatch setting? (debug hack) 00047 00048 namespace PLearn { 00049 using namespace std; 00050 00051 PLEARN_IMPLEMENT_OBJECT( 00052 PseudolikelihoodRBM, 00053 "Restricted Boltzmann Machine trained by (generalized) pseudolikelihood.", 00054 ""); 00055 00057 // PseudolikelihoodRBM // 00059 PseudolikelihoodRBM::PseudolikelihoodRBM() : 00060 learning_rate( 0. ), 00061 decrease_ct( 0. ), 00062 cd_learning_rate( 0. ), 00063 cd_decrease_ct( 0. ), 00064 cd_n_gibbs( 1 ), 00065 persistent_cd_weight( 0. ), 00066 n_gibbs_chains( 1 ), 00067 use_mean_field_cd( false ), 00068 denoising_learning_rate( 0. ), 00069 denoising_decrease_ct( 0. ), 00070 fraction_of_masked_inputs( 0. ), 00071 only_reconstruct_masked_inputs( false ), 00072 n_classes( -1 ), 00073 input_is_sparse( false ), 00074 factorized_connection_rank( -1 ), 00075 n_selected_inputs_pseudolikelihood( -1 ), 00076 n_selected_inputs_cd( -1 ), 00077 //select_among_k_most_frequent( -1 ), 00078 compute_input_space_nll( false ), 00079 compute_Z_exactly( true ), 00080 use_ais_to_compute_Z( false ), 00081 n_ais_chains( 100 ), 00082 pseudolikelihood_context_size ( 0 ), 00083 pseudolikelihood_context_type( "uniform_random" ), 00084 k_most_correlated( -1 ), 00085 generative_learning_weight( 0 ), 00086 sparsity_bias_decay( 0 ), 00087 semi_sup_learning_weight( 0. ), 00088 nll_cost_index( -1 ), 00089 log_Z_cost_index( -1 ), 00090 log_Z_ais_cost_index( -1 ), 00091 log_Z_interval_lower_cost_index( -1 ), 00092 log_Z_interval_upper_cost_index( -1 ), 00093 class_cost_index( -1 ), 00094 training_cpu_time_cost_index ( -1 ), 00095 cumulative_training_time_cost_index ( -1 ), 00096 //cumulative_testing_time_cost_index ( -1 ), 00097 cumulative_training_time( 0 ), 00098 //cumulative_testing_time( 0 ), 00099 log_Z( MISSING_VALUE ), 00100 log_Z_ais( MISSING_VALUE ), 00101 log_Z_down( MISSING_VALUE ), 00102 log_Z_up( MISSING_VALUE ), 00103 Z_is_up_to_date( false ), 00104 Z_ais_is_up_to_date( false ) 00105 { 00106 random_gen = new PRandom(); 00107 } 00108 00110 // declareOptions // 00112 void PseudolikelihoodRBM::declareOptions(OptionList& ol) 00113 { 00114 declareOption(ol, "learning_rate", &PseudolikelihoodRBM::learning_rate, 00115 OptionBase::buildoption, 00116 "The learning rate used for pseudolikelihood training.\n" 00117 "Pseudolikelihood training assumes input_layer is a\n" 00118 "RBMBinomialLayer. It will work even if it isn't,\n" 00119 "but training won't be appropriate.\n"); 00120 00121 declareOption(ol, "decrease_ct", &PseudolikelihoodRBM::decrease_ct, 00122 OptionBase::buildoption, 00123 "The decrease constant of the learning rate.\n"); 00124 00125 declareOption(ol, "cd_learning_rate", &PseudolikelihoodRBM::cd_learning_rate, 00126 OptionBase::buildoption, 00127 "The learning rate used for contrastive divergence learning.\n"); 00128 00129 declareOption(ol, "cd_decrease_ct", &PseudolikelihoodRBM::cd_decrease_ct, 00130 OptionBase::buildoption, 00131 "The decrease constant of the contrastive divergence " 00132 "learning rate.\n"); 00133 00134 declareOption(ol, "cd_n_gibbs", &PseudolikelihoodRBM::cd_n_gibbs, 00135 OptionBase::buildoption, 00136 "Number of negative phase gibbs sampling steps.\n"); 00137 00138 declareOption(ol, "persistent_cd_weight", 00139 &PseudolikelihoodRBM::persistent_cd_weight, 00140 OptionBase::buildoption, 00141 "Weight of Persistent Contrastive Divergence, i.e. " 00142 "weight of the prolonged gibbs chain.\n"); 00143 00144 declareOption(ol, "n_gibbs_chains", 00145 &PseudolikelihoodRBM::n_gibbs_chains, 00146 OptionBase::buildoption, 00147 "Number of gibbs chains maintained in parallel for " 00148 "Persistent Contrastive Divergence.\n"); 00149 00150 declareOption(ol, "use_mean_field_cd", &PseudolikelihoodRBM::use_mean_field_cd, 00151 OptionBase::buildoption, 00152 "Indication that a mean-field version of Contrastive " 00153 "Divergence (MF-CD) should be used.\n"); 00154 00155 declareOption(ol, "denoising_learning_rate", 00156 &PseudolikelihoodRBM::denoising_learning_rate, 00157 OptionBase::buildoption, 00158 "The learning rate used for denoising autoencoder learning.\n"); 00159 00160 declareOption(ol, "denoising_decrease_ct", 00161 &PseudolikelihoodRBM::denoising_decrease_ct, 00162 OptionBase::buildoption, 00163 "The decrease constant of the denoising autoencoder " 00164 "learning rate.\n"); 00165 00166 declareOption(ol, "fraction_of_masked_inputs", 00167 &PseudolikelihoodRBM::fraction_of_masked_inputs, 00168 OptionBase::buildoption, 00169 "Fraction of input components set to 0 for denoising " 00170 "autoencoder learning.\n"); 00171 00172 declareOption(ol, "only_reconstruct_masked_inputs", 00173 &PseudolikelihoodRBM::only_reconstruct_masked_inputs, 00174 OptionBase::buildoption, 00175 "Indication that only the masked inputs should be reconstructed.\n"); 00176 00177 declareOption(ol, "n_classes", &PseudolikelihoodRBM::n_classes, 00178 OptionBase::buildoption, 00179 "Number of classes in the training set (for supervised learning).\n" 00180 "If < 2, unsupervised learning will be performed.\n" 00181 ); 00182 00183 declareOption(ol, "input_is_sparse", &PseudolikelihoodRBM::input_is_sparse, 00184 OptionBase::buildoption, 00185 "Indication that the input is in a sparse format. Input is also assumed\n" 00186 "to be binary.\n" 00187 ); 00188 00189 declareOption(ol, "factorized_connection_rank", &PseudolikelihoodRBM::factorized_connection_rank, 00190 OptionBase::buildoption, 00191 "Rank of factorized connection for sparse inputs.\n" 00192 ); 00193 00194 declareOption(ol, "n_selected_inputs_pseudolikelihood", 00195 &PseudolikelihoodRBM::n_selected_inputs_pseudolikelihood, 00196 OptionBase::buildoption, 00197 "Number of randomly selected inputs for pseudolikelihood cost." 00198 "This option is ignored for pseudolikelihood_context_size > 0.\n" 00199 ); 00200 00201 declareOption(ol, "n_selected_inputs_cd", 00202 &PseudolikelihoodRBM::n_selected_inputs_cd, 00203 OptionBase::buildoption, 00204 "Number of randomly selected inputs for CD in sparse " 00205 "input case.\n" 00206 "Note that CD for sparse inputs assumes RBMBinomialLayer in " 00207 "input.\n" 00208 ); 00209 00210 //declareOption(ol, "select_among_k_most_frequent", 00211 // &PseudolikelihoodRBM::select_among_k_most_frequent, 00212 // OptionBase::buildoption, 00213 // "Indication that inputs for pseudolikelihood cost are selected among the\n" 00214 // "k most frequently active inputs.\n" 00215 // ); 00216 00217 declareOption(ol, "compute_input_space_nll", 00218 &PseudolikelihoodRBM::compute_input_space_nll, 00219 OptionBase::buildoption, 00220 "Indication that the input space NLL should be " 00221 "computed during test. It will require a procedure to compute\n" 00222 "the partition function Z, which can be exact (see compute_Z_exactly)\n" 00223 "or approximate (see use_ais_to_compute_Z). If both are true,\n" 00224 "exact computation will be used.\n" 00225 ); 00226 00227 declareOption(ol, "compute_Z_exactly", 00228 &PseudolikelihoodRBM::compute_Z_exactly, 00229 OptionBase::buildoption, 00230 "Indication that the partition function Z should be computed exactly.\n" 00231 ); 00232 00233 declareOption(ol, "use_ais_to_compute_Z", 00234 &PseudolikelihoodRBM::use_ais_to_compute_Z, 00235 OptionBase::buildoption, 00236 "Whether to use AIS (see Salakhutdinov and Murray ICML2008) to\n" 00237 "compute Z. Assumes the input layer is an RBMBinomialLayer.\n" 00238 ); 00239 00240 declareOption(ol, "n_ais_chains", 00241 &PseudolikelihoodRBM::n_ais_chains, 00242 OptionBase::buildoption, 00243 "Number of AIS chains.\n" 00244 ); 00245 00246 declareOption(ol, "ais_beta_begin", 00247 &PseudolikelihoodRBM::ais_beta_begin, 00248 OptionBase::buildoption, 00249 "List of interval beginnings, used to specify the beta schedule.\n" 00250 "Its first element is always set to 0.\n" 00251 ); 00252 00253 declareOption(ol, "ais_beta_end", 00254 &PseudolikelihoodRBM::ais_beta_end, 00255 OptionBase::buildoption, 00256 "List of interval ends, used to specify the beta schedule.\n" 00257 "Its last element is always set to 1.\n" 00258 ); 00259 00260 declareOption(ol, "ais_beta_n_steps", 00261 &PseudolikelihoodRBM::ais_beta_n_steps, 00262 OptionBase::buildoption, 00263 "Number of steps in each of the beta interval, used to " 00264 "specify the beta schedule.\n" 00265 ); 00266 00267 declareOption(ol, "pseudolikelihood_context_size", 00268 &PseudolikelihoodRBM::pseudolikelihood_context_size, 00269 OptionBase::buildoption, 00270 "Number of additional input variables chosen to form the joint\n" 00271 "condition likelihoods in generalized pseudolikelihood\n" 00272 "(default = 0, which corresponds to standard pseudolikelihood).\n" 00273 ); 00274 00275 declareOption(ol, "pseudolikelihood_context_type", 00276 &PseudolikelihoodRBM::pseudolikelihood_context_type, 00277 OptionBase::buildoption, 00278 "Type of context for generalized pseudolikelihood:\n" 00279 "\"uniform_random\": context elements are picked uniformly randomly\n" 00280 "\n" 00281 "- \"most_correlated\": the most correlated (positively or negatively\n" 00282 " elemenst with the current input element are picked\n" 00283 "\n" 00284 "- \"most_correlated_uniform_random\": context elements are picked uniformly\n" 00285 " among the k_most_correlated other input\n" 00286 " elements, for each current input\n" 00287 ); 00288 00289 declareOption(ol, "k_most_correlated", 00290 &PseudolikelihoodRBM::k_most_correlated, 00291 OptionBase::buildoption, 00292 "Number of most correlated input elements over which to sample.\n" 00293 ); 00294 00295 declareOption(ol, "generative_learning_weight", 00296 &PseudolikelihoodRBM::generative_learning_weight, 00297 OptionBase::buildoption, 00298 "Weight of generative learning.\n" 00299 ); 00300 00301 declareOption(ol, "sparsity_bias_decay", 00302 &PseudolikelihoodRBM::sparsity_bias_decay, 00303 OptionBase::buildoption, 00304 "Constant to subtract (times the learning rate) to the hidden " 00305 "layer bias at each iteration.\n" 00306 ); 00307 00308 declareOption(ol, "semi_sup_learning_weight", 00309 &PseudolikelihoodRBM::semi_sup_learning_weight, 00310 OptionBase::buildoption, 00311 "Weight on unlabeled examples update during unsupervised learning.\n" 00312 "In other words, it's the same thing at generaitve_learning_weight,\n" 00313 "but for the unlabeled examples.\n"); 00314 00315 declareOption(ol, "input_layer", &PseudolikelihoodRBM::input_layer, 00316 OptionBase::buildoption, 00317 "The binomial input layer of the RBM.\n"); 00318 00319 declareOption(ol, "hidden_layer", &PseudolikelihoodRBM::hidden_layer, 00320 OptionBase::buildoption, 00321 "The hidden layer of the RBM.\n"); 00322 00323 declareOption(ol, "connection", &PseudolikelihoodRBM::connection, 00324 OptionBase::buildoption, 00325 "The connection weights between the input and hidden layer.\n"); 00326 00327 declareOption(ol, "cumulative_training_time", 00328 &PseudolikelihoodRBM::cumulative_training_time, 00329 //OptionBase::learntoption | OptionBase::nosave, 00330 OptionBase::learntoption, 00331 "Cumulative training time since age=0, in seconds.\n"); 00332 00333 // declareOption(ol, "cumulative_testing_time", 00334 // &PseudolikelihoodRBM::cumulative_testing_time, 00335 // //OptionBase::learntoption | OptionBase::nosave, 00336 // OptionBase::learntoption, 00337 // "Cumulative testing time since age=0, in seconds.\n"); 00338 00339 00340 declareOption(ol, "target_layer", &PseudolikelihoodRBM::target_layer, 00341 OptionBase::learntoption, 00342 "The target layer of the RBM.\n"); 00343 00344 declareOption(ol, "target_connection", &PseudolikelihoodRBM::target_connection, 00345 OptionBase::learntoption, 00346 "The connection weights between the target and hidden layer.\n"); 00347 00348 declareOption(ol, "U", &PseudolikelihoodRBM::U, 00349 OptionBase::learntoption, 00350 "First connection factorization matrix.\n"); 00351 00352 declareOption(ol, "V", &PseudolikelihoodRBM::V, 00353 OptionBase::learntoption, 00354 "If factorized_connection_rank > 0, second connection " 00355 "factorization matrix. Otherwise, input connections.\n"); 00356 00357 declareOption(ol, "log_Z", &PseudolikelihoodRBM::log_Z, 00358 OptionBase::learntoption, 00359 "Normalisation constant, computed exactly (on log scale).\n"); 00360 00361 declareOption(ol, "log_Z_ais", &PseudolikelihoodRBM::log_Z_ais, 00362 OptionBase::learntoption, 00363 "Normalisation constant, computed by AIS (on log scale).\n"); 00364 00365 declareOption(ol, "log_Z_down", &PseudolikelihoodRBM::log_Z_down, 00366 OptionBase::learntoption, 00367 "Lower bound of confidence interval for log_Z.\n"); 00368 00369 declareOption(ol, "log_Z_up", &PseudolikelihoodRBM::log_Z_up, 00370 OptionBase::learntoption, 00371 "Upper bound of confidence interval for log_Z.\n"); 00372 00373 declareOption(ol, "Z_is_up_to_date", &PseudolikelihoodRBM::Z_is_up_to_date, 00374 OptionBase::learntoption, 00375 "Indication that the normalisation constant Z (computed exactly) " 00376 "is up to date.\n"); 00377 00378 declareOption(ol, "Z_ais_is_up_to_date", &PseudolikelihoodRBM::Z_ais_is_up_to_date, 00379 OptionBase::learntoption, 00380 "Indication that the normalisation constant Z (computed with AIS) " 00381 "is up to date.\n"); 00382 00383 declareOption(ol, "persistent_gibbs_chain_is_started", 00384 &PseudolikelihoodRBM::persistent_gibbs_chain_is_started, 00385 OptionBase::learntoption, 00386 "Indication that the prolonged gibbs chain for " 00387 "Persistent Consistent Divergence is started, for each chain.\n"); 00388 00389 // declareOption(ol, "target_weights_L1_penalty_factor", 00390 // &PseudolikelihoodRBM::target_weights_L1_penalty_factor, 00391 // OptionBase::buildoption, 00392 // "Target weights' L1_penalty_factor.\n"); 00393 // 00394 // declareOption(ol, "target_weights_L2_penalty_factor", 00395 // &PseudolikelihoodRBM::target_weights_L2_penalty_factor, 00396 // OptionBase::buildoption, 00397 // "Target weights' L2_penalty_factor.\n"); 00398 00399 // Now call the parent class' declareOptions 00400 inherited::declareOptions(ol); 00401 } 00402 00404 // build_ // 00406 void PseudolikelihoodRBM::build_() 00407 { 00408 MODULE_LOG << "build_() called" << endl; 00409 00410 if( inputsize_ > 0 && targetsize_ >= 0) 00411 { 00412 if( compute_input_space_nll && targetsize() > 0 ) 00413 PLERROR("In PseudolikelihoodRBM::build_(): compute_input_space_nll " 00414 "is not compatible with targetsize() > 0"); 00415 00416 if( compute_input_space_nll && input_is_sparse ) 00417 PLERROR("In PseudolikelihoodRBM::build_(): compute_input_space_nll " 00418 "is not compatible with sparse inputs"); 00419 00420 if( pseudolikelihood_context_size < 0 ) 00421 PLERROR("In PseudolikelihoodRBM::build_(): " 00422 "pseudolikelihood_context_size should be >= 0."); 00423 00424 if( pseudolikelihood_context_type != "uniform_random" && 00425 pseudolikelihood_context_type != "most_correlated" && 00426 pseudolikelihood_context_type != "most_correlated_uniform_random" ) 00427 PLERROR("In PseudolikelihoodRBM::build_(): " 00428 "pseudolikelihood_context_type is not valid."); 00429 00430 if( pseudolikelihood_context_type == "most_correlated" 00431 && pseudolikelihood_context_size <= 0 ) 00432 PLERROR("In PseudolikelihoodRBM::build_(): " 00433 "pseudolikelihood_context_size should be > 0 " 00434 "for \"most_correlated\" context type"); 00435 00436 if( compute_input_space_nll && use_ais_to_compute_Z ) 00437 { 00438 if( n_ais_chains <= 0 ) 00439 PLERROR("In PseudolikelihoodRBM::build_(): " 00440 "n_ais_chains should be > 0."); 00441 if( ais_beta_n_steps.length() == 0 ) 00442 PLERROR("In PseudolikelihoodRBM::build_(): " 00443 "AIS schedule should have at least 1 interval of betas."); 00444 if( ais_beta_n_steps.length() != ais_beta_begin.length() || 00445 ais_beta_n_steps.length() != ais_beta_end.length() ) 00446 PLERROR("In PseudolikelihoodRBM::build_(): " 00447 "ais_beta_begin, ais_beta_end and ais_beta_n_steps should " 00448 "all be of the same length."); 00449 } 00450 00451 build_layers_and_connections(); 00452 build_costs(); 00453 00454 // Activate the profiler 00455 Profiler::activate(); 00456 } 00457 } 00458 00460 // build_costs // 00462 void PseudolikelihoodRBM::build_costs() 00463 { 00464 cost_names.resize(0); 00465 00466 int current_index = 0; 00467 if( compute_input_space_nll || targetsize() > 0 ) 00468 { 00469 cost_names.append("NLL"); 00470 nll_cost_index = current_index; 00471 current_index++; 00472 if( compute_Z_exactly ) 00473 { 00474 cost_names.append("log_Z"); 00475 log_Z_cost_index = current_index++; 00476 } 00477 00478 if( use_ais_to_compute_Z ) 00479 { 00480 cost_names.append("log_Z_ais"); 00481 log_Z_ais_cost_index = current_index++; 00482 cost_names.append("log_Z_interval_lower"); 00483 log_Z_interval_lower_cost_index = current_index++; 00484 cost_names.append("log_Z_interval_upper"); 00485 log_Z_interval_upper_cost_index = current_index++; 00486 } 00487 } 00488 00489 if( targetsize() > 0 ) 00490 { 00491 cost_names.append("class_error"); 00492 class_cost_index = current_index; 00493 current_index++; 00494 } 00495 00496 cost_names.append("cpu_time"); 00497 cost_names.append("cumulative_train_time"); 00498 //cost_names.append("cumulative_test_time"); 00499 00500 training_cpu_time_cost_index = current_index; 00501 current_index++; 00502 cumulative_training_time_cost_index = current_index; 00503 current_index++; 00504 //cumulative_testing_time_cost_index = current_index; 00505 //current_index++; 00506 00507 00508 PLASSERT( current_index == cost_names.length() ); 00509 } 00510 00512 // build_layers_and_connections // 00514 void PseudolikelihoodRBM::build_layers_and_connections() 00515 { 00516 MODULE_LOG << "build_layers_and_connections() called" << endl; 00517 00518 if( !input_layer ) 00519 PLERROR("In PseudolikelihoodRBM::build_layers_and_connections(): " 00520 "input_layer must be provided"); 00521 if( !hidden_layer ) 00522 PLERROR("In PseudolikelihoodRBM::build_layers_and_connections(): " 00523 "hidden_layer must be provided"); 00524 00525 if( targetsize() == 1 ) 00526 { 00527 if( n_classes <= 1 ) 00528 PLERROR("In PseudolikelihoodRBM::build_layers_and_connections(): " 00529 "n_classes should be > 1"); 00530 if( !target_layer || target_layer->size != n_classes ) 00531 { 00532 target_layer = new RBMMultinomialLayer(); 00533 target_layer->size = n_classes; 00534 target_layer->random_gen = random_gen; 00535 target_layer->build(); 00536 target_layer->forget(); 00537 } 00538 00539 if( !target_connection || 00540 target_connection->up_size != hidden_layer->size || 00541 target_connection->down_size != target_layer->size ) 00542 { 00543 target_connection = new RBMMatrixConnection(); 00544 target_connection->up_size = hidden_layer->size; 00545 target_connection->down_size = target_layer->size; 00546 target_connection->random_gen = random_gen; 00547 target_connection->build(); 00548 target_connection->forget(); 00549 } 00550 } 00551 else if ( targetsize() > 1 ) 00552 { 00553 if( !target_layer || target_layer->size != targetsize() ) 00554 { 00555 target_layer = new RBMBinomialLayer(); 00556 target_layer->size = targetsize(); 00557 target_layer->random_gen = random_gen; 00558 target_layer->build(); 00559 target_layer->forget(); 00560 } 00561 00562 if( !target_connection || 00563 target_connection->up_size != hidden_layer->size || 00564 target_connection->down_size != target_layer->size ) 00565 { 00566 target_connection = new RBMMatrixConnection(); 00567 target_connection->up_size = hidden_layer->size; 00568 target_connection->down_size = target_layer->size; 00569 target_connection->random_gen = random_gen; 00570 target_connection->build(); 00571 target_connection->forget(); 00572 } 00573 } 00574 00575 if( !connection && !input_is_sparse ) 00576 PLERROR("PseudolikelihoodRBM::build_layers_and_connections(): \n" 00577 "connection must be provided"); 00578 00579 if( input_is_sparse ) 00580 { 00581 if( factorized_connection_rank > 0 ) 00582 { 00583 U.resize( hidden_layer->size, factorized_connection_rank ); 00584 V.resize( inputsize(), factorized_connection_rank ); 00585 Vx.resize( factorized_connection_rank ); 00586 00587 U_gradient.resize( hidden_layer->size, factorized_connection_rank ); 00588 Vx_gradient.resize( factorized_connection_rank ); 00589 } 00590 else 00591 { 00592 V.resize( inputsize(), hidden_layer->size ); 00593 } 00594 input_is_active.resize( inputsize() ); 00595 input_is_active.clear(); 00596 hidden_act_non_selected.resize( hidden_layer->size ); 00597 // CD option 00598 pos_hidden.resize( hidden_layer->size ); 00599 pos_input_sparse.resize( input_layer->size ); 00600 pos_input_sparse.clear(); 00601 } 00602 else 00603 { 00604 if( connection->up_size != hidden_layer->size || 00605 connection->down_size != input_layer->size ) 00606 PLERROR("PseudolikelihoodRBM::build_layers_and_connections(): \n" 00607 "connection's size (%d x %d) should be %d x %d", 00608 connection->up_size, connection->down_size, 00609 hidden_layer->size, input_layer->size); 00610 connection_gradient.resize( connection->up_size, connection->down_size ); 00611 00612 if( !connection->random_gen ) 00613 { 00614 connection->random_gen = random_gen; 00615 connection->forget(); 00616 } 00617 00618 // CD option 00619 pos_hidden.resize( hidden_layer->size ); 00620 pers_cd_hidden.resize( n_gibbs_chains ); 00621 for( int i=0; i<n_gibbs_chains; i++ ) 00622 { 00623 pers_cd_hidden[i].resize( hidden_layer->size ); 00624 } 00625 if( persistent_gibbs_chain_is_started.length() != n_gibbs_chains ) 00626 { 00627 persistent_gibbs_chain_is_started.resize( n_gibbs_chains ); 00628 persistent_gibbs_chain_is_started.fill( false ); 00629 } 00630 00631 // Denoising autoencoder options 00632 transpose_connection = new RBMMatrixTransposeConnection; 00633 transpose_connection->rbm_matrix_connection = connection; 00634 transpose_connection->build(); 00635 reconstruction_activation_gradient.resize(input_layer->size); 00636 hidden_layer_expectation_gradient.resize(hidden_layer->size); 00637 hidden_layer_activation_gradient.resize(hidden_layer->size); 00638 masked_autoencoder_input.resize(input_layer->size); 00639 autoencoder_input_indices.resize(input_layer->size); 00640 for(int i=0; i<input_layer->size; i++) 00641 autoencoder_input_indices[i] = i; 00642 } 00643 00644 input_gradient.resize( input_layer->size ); 00645 hidden_activation_pos_i.resize( hidden_layer->size ); 00646 hidden_activation_neg_i.resize( hidden_layer->size ); 00647 hidden_activation_gradient.resize( hidden_layer->size ); 00648 hidden_activation_pos_i_gradient.resize( hidden_layer->size ); 00649 hidden_activation_neg_i_gradient.resize( hidden_layer->size ); 00650 00651 00652 // Generalized pseudolikelihood option 00653 context_indices.resize( input_layer->size - 1); 00654 if( pseudolikelihood_context_size > 0 ) 00655 { 00656 context_indices_per_i.resize( input_layer->size, 00657 pseudolikelihood_context_size ); 00658 00659 int n_conf = ipow(2, pseudolikelihood_context_size); 00660 nums_act.resize( 2 * n_conf ); 00661 gnums_act.resize( 2 * n_conf ); 00662 context_probs.resize( 2 * n_conf ); 00663 hidden_activations_context.resize( 2*n_conf, hidden_layer->size ); 00664 hidden_activations_context_k_gradient.resize( hidden_layer->size ); 00665 } 00666 00667 00668 00669 if( inputsize_ >= 0 ) 00670 PLASSERT( input_layer->size == inputsize() ); 00671 00672 if( targetsize() > 0 ) 00673 { 00674 class_output.resize( target_layer->size ); 00675 class_gradient.resize( target_layer->size ); 00676 target_one_hot.resize( target_layer->size ); 00677 00678 pos_target.resize( target_layer->size ); 00679 neg_target.resize( target_layer->size ); 00680 } 00681 00682 if( !input_layer->random_gen ) 00683 { 00684 input_layer->random_gen = random_gen; 00685 input_layer->forget(); 00686 } 00687 00688 if( !hidden_layer->random_gen ) 00689 { 00690 hidden_layer->random_gen = random_gen; 00691 hidden_layer->forget(); 00692 } 00693 } 00694 00696 // build // 00698 void PseudolikelihoodRBM::build() 00699 { 00700 inherited::build(); 00701 build_(); 00702 } 00703 00705 // makeDeepCopyFromShallowCopy // 00707 void PseudolikelihoodRBM::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00708 { 00709 inherited::makeDeepCopyFromShallowCopy(copies); 00710 00711 deepCopyField(input_layer, copies); 00712 deepCopyField(hidden_layer, copies); 00713 deepCopyField(connection, copies); 00714 deepCopyField(cost_names, copies); 00715 deepCopyField(transpose_connection, copies); 00716 deepCopyField(target_layer, copies); 00717 deepCopyField(target_connection, copies); 00718 deepCopyField(U, copies); 00719 deepCopyField(V, copies); 00720 00721 deepCopyField(target_one_hot, copies); 00722 deepCopyField(input_gradient, copies); 00723 deepCopyField(class_output, copies); 00724 deepCopyField(class_gradient, copies); 00725 deepCopyField(hidden_activation_pos_i, copies); 00726 deepCopyField(hidden_activation_neg_i, copies); 00727 deepCopyField(hidden_activation_gradient, copies); 00728 deepCopyField(hidden_activation_pos_i_gradient, copies); 00729 deepCopyField(hidden_activation_neg_i_gradient, copies); 00730 deepCopyField(connection_gradient, copies); 00731 deepCopyField(context_indices, copies); 00732 deepCopyField(context_indices_per_i, copies); 00733 deepCopyField(correlations_per_i, copies); 00734 deepCopyField(context_most_correlated, copies); 00735 deepCopyField(hidden_activations_context, copies); 00736 deepCopyField(hidden_activations_context_k_gradient, copies); 00737 deepCopyField(nums, copies); 00738 deepCopyField(nums_act, copies); 00739 deepCopyField(context_probs, copies); 00740 deepCopyField(gnums_act, copies); 00741 deepCopyField(conf, copies); 00742 deepCopyField(pos_input, copies); 00743 deepCopyField(pos_target, copies); 00744 deepCopyField(pos_hidden, copies); 00745 deepCopyField(neg_input, copies); 00746 deepCopyField(neg_target, copies); 00747 deepCopyField(neg_hidden, copies); 00748 deepCopyField(reconstruction_activation_gradient, copies); 00749 deepCopyField(hidden_layer_expectation_gradient, copies); 00750 deepCopyField(hidden_layer_activation_gradient, copies); 00751 deepCopyField(masked_autoencoder_input, copies); 00752 deepCopyField(autoencoder_input_indices, copies); 00753 deepCopyField(pers_cd_hidden, copies); 00754 deepCopyField(Vx, copies); 00755 deepCopyField(U_gradient, copies); 00756 deepCopyField(Vx_gradient, copies); 00757 deepCopyField(V_gradients, copies); 00758 deepCopyField(input_is_active, copies); 00759 deepCopyField(input_indices, copies); 00760 deepCopyField(input_is_selected, copies); 00761 deepCopyField(hidden_act_non_selected, copies); 00762 deepCopyField(pos_input_sparse, copies); 00763 deepCopyField(persistent_gibbs_chain_is_started, copies); 00764 } 00765 00766 00768 // outputsize // 00770 int PseudolikelihoodRBM::outputsize() const 00771 { 00772 return targetsize() > 0 ? target_layer->size : hidden_layer->size; 00773 } 00774 00776 // forget // 00778 void PseudolikelihoodRBM::forget() 00779 { 00780 inherited::forget(); 00781 00782 input_layer->forget(); 00783 hidden_layer->forget(); 00784 if( connection ) 00785 connection->forget(); 00786 00787 cumulative_training_time = 0; 00788 //cumulative_testing_time = 0; 00789 Z_is_up_to_date = false; 00790 Z_ais_is_up_to_date = false; 00791 00792 persistent_gibbs_chain_is_started.fill( false ); 00793 correlations_per_i.resize(0,0); 00794 00795 if( U.size() != 0 ) 00796 { 00797 real d = 1. / max( U.length(), U.width() ); 00798 random_gen->fill_random_uniform( U, -d, d ); 00799 } 00800 00801 if( V.size() != 0 ) 00802 V.clear(); 00803 00804 if( target_layer ) 00805 target_layer->forget(); 00806 00807 if( target_connection ) 00808 target_connection->forget(); 00809 } 00810 00812 // train // 00814 void PseudolikelihoodRBM::train() 00815 { 00816 MODULE_LOG << "train() called " << endl; 00817 00818 MODULE_LOG << "stage = " << stage 00819 << ", target nstages = " << nstages << endl; 00820 00821 PLASSERT( train_set ); 00822 00823 Vec input( inputsize() ); 00824 Vec target( targetsize() ); 00825 Vec extra( 1 ); 00826 int target_index; 00827 real weight; // unused 00828 real lr; 00829 int weightsize = train_set->weightsize(); 00830 00831 //real mean_pseudolikelihood = 0; 00832 00833 TVec<string> train_cost_names = getTrainCostNames() ; 00834 Vec train_costs( train_cost_names.length() ); 00835 train_costs.fill(MISSING_VALUE) ; 00836 00837 int nsamples = train_set->length(); 00838 int init_stage = stage; 00839 if( !initTrain() ) 00840 { 00841 MODULE_LOG << "train() aborted" << endl; 00842 return; 00843 } 00844 00845 PP<ProgressBar> pb; 00846 00847 // clear stats of previous epoch 00848 train_stats->forget(); 00849 00850 if( report_progress ) 00851 pb = new ProgressBar( "Training " 00852 + classname(), 00853 nstages - stage ); 00854 00855 // Start the actual time counting 00856 Profiler::reset("training"); 00857 Profiler::start("training"); 00858 00859 for( ; stage<nstages ; stage++ ) 00860 { 00861 Z_is_up_to_date = false; 00862 Z_ais_is_up_to_date = false; 00863 train_set->getExample(stage%nsamples, input, target, weight); 00864 00865 if( pb ) 00866 pb->update( stage - init_stage + 1 ); 00867 00868 if( targetsize() == 1 ) 00869 { 00870 target_one_hot.clear(); 00871 if( !is_missing(target[0]) ) 00872 { 00873 target_index = (int)round( target[0] ); 00874 target_one_hot[ target_index ] = 1; 00875 } 00876 } 00877 // else 00878 // { 00879 00880 // Discriminative learning is the sum of all learning rates 00881 lr = 0; 00882 00883 if( !fast_exact_is_equal(decrease_ct, 0) ) 00884 lr += learning_rate / (1.0 + stage * decrease_ct ); 00885 else 00886 lr += learning_rate; 00887 00888 if( !fast_exact_is_equal(cd_decrease_ct, 0) ) 00889 lr += cd_learning_rate / (1.0 + stage * cd_decrease_ct ); 00890 else 00891 lr += cd_learning_rate; 00892 00893 if( !fast_exact_is_equal(denoising_decrease_ct, 0) ) 00894 lr += denoising_learning_rate / (1.0 + stage * denoising_decrease_ct ); 00895 else 00896 lr += denoising_learning_rate; 00897 00898 if( weightsize > 0 ) 00899 lr *= weight; 00900 00901 setLearningRate(lr); 00902 00903 if( targetsize() == 1 && !is_missing(target[0]) ) 00904 { 00905 Vec target_act = target_layer->activation; 00906 Vec hidden_act = hidden_layer->activation; 00907 00908 // For gradient verification 00909 //Mat estimated_gradient(connection->up_size, connection->down_size); 00910 //{ 00911 // connection->setAsDownInput( input ); 00912 // hidden_layer->getAllActivations( 00913 // (RBMMatrixConnection*) connection ); 00914 // 00915 // target_act = target_layer->activation; 00916 // hidden_act = hidden_layer->activation; 00917 // for( int i=0 ; i<target_layer->size ; i++ ) 00918 // { 00919 // target_act[i] = target_layer->bias[i]; 00920 // // LATERAL CONNECTIONS CODE HERE!! 00921 // real *w = &(target_connection->weights(0,i)); 00922 // // step from one row to the next in weights matrix 00923 // int m = target_connection->weights.mod(); 00924 // 00925 // for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 00926 // { 00927 // // *w = weights(j,i) 00928 // hidden_activation_pos_i[j] = hidden_act[j] + *w; 00929 // } 00930 // target_act[i] -= hidden_layer->freeEnergyContribution( 00931 // hidden_activation_pos_i); 00932 // } 00933 // 00934 // target_layer->expectation_is_up_to_date = false; 00935 // target_layer->computeExpectation(); 00936 // real true_nll = target_layer->fpropNLL(target_one_hot); 00937 // 00938 // estimated_gradient.fill(true_nll); 00939 // 00940 // real epsilon = 1e-5; 00941 // for( int i1=0; i1<connection->up_size; i1++) 00942 // for( int j1=0; j1<connection->down_size; j1++) 00943 // { 00944 // connection->weights(i1,j1) += epsilon; 00945 // connection->setAsDownInput( input ); 00946 // hidden_layer->getAllActivations( 00947 // (RBMMatrixConnection*) connection ); 00948 // 00949 // Vec target_act = target_layer->activation; 00950 // Vec hidden_act = hidden_layer->activation; 00951 // for( int i=0 ; i<target_layer->size ; i++ ) 00952 // { 00953 // target_act[i] = target_layer->bias[i]; 00954 // // LATERAL CONNECTIONS CODE HERE!! 00955 // real *w = &(target_connection->weights(0,i)); 00956 // // step from one row to the next in weights matrix 00957 // int m = target_connection->weights.mod(); 00958 // 00959 // for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 00960 // { 00961 // // *w = weights(j,i) 00962 // hidden_activation_pos_i[j] = hidden_act[j] + *w; 00963 // } 00964 // target_act[i] -= hidden_layer->freeEnergyContribution( 00965 // hidden_activation_pos_i); 00966 // } 00967 // 00968 // target_layer->expectation_is_up_to_date = false; 00969 // target_layer->computeExpectation(); 00970 // real nll = target_layer->fpropNLL(target_one_hot); 00971 // 00972 // estimated_gradient(i1,j1) = (nll - estimated_gradient(i1,j1) )/epsilon; 00973 // connection->weights(i1,j1) -= epsilon; 00974 // } 00975 //} 00976 00977 // For gradient verification of target connections 00978 //Mat estimated_target_gradient(target_connection->up_size, target_connection->down_size); 00979 //{ 00980 // connection->setAsDownInput( input ); 00981 // hidden_layer->getAllActivations( 00982 // (RBMMatrixConnection*) connection ); 00983 // 00984 // target_act = target_layer->activation; 00985 // hidden_act = hidden_layer->activation; 00986 // for( int i=0 ; i<target_layer->size ; i++ ) 00987 // { 00988 // target_act[i] = target_layer->bias[i]; 00989 // // LATERAL CONNECTIONS CODE HERE!! 00990 // real *w = &(target_connection->weights(0,i)); 00991 // // step from one row to the next in weights matrix 00992 // int m = target_connection->weights.mod(); 00993 // 00994 // for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 00995 // { 00996 // // *w = weights(j,i) 00997 // hidden_activation_pos_i[j] = hidden_act[j] + *w; 00998 // } 00999 // target_act[i] -= hidden_layer->freeEnergyContribution( 01000 // hidden_activation_pos_i); 01001 // } 01002 // 01003 // target_layer->expectation_is_up_to_date = false; 01004 // target_layer->computeExpectation(); 01005 // real true_nll = target_layer->fpropNLL(target_one_hot); 01006 // 01007 // estimated_target_gradient.fill(true_nll); 01008 // 01009 // real epsilon = 1e-5; 01010 // for( int i1=0; i1<target_connection->up_size; i1++) 01011 // for( int j1=0; j1<target_connection->down_size; j1++) 01012 // { 01013 // target_connection->weights(i1,j1) += epsilon; 01014 // connection->setAsDownInput( input ); 01015 // hidden_layer->getAllActivations( 01016 // (RBMMatrixConnection*) connection ); 01017 // 01018 // Vec target_act = target_layer->activation; 01019 // Vec hidden_act = hidden_layer->activation; 01020 // for( int i=0 ; i<target_layer->size ; i++ ) 01021 // { 01022 // target_act[i] = target_layer->bias[i]; 01023 // // LATERAL CONNECTIONS CODE HERE!! 01024 // real *w = &(target_connection->weights(0,i)); 01025 // // step from one row to the next in weights matrix 01026 // int m = target_connection->weights.mod(); 01027 // 01028 // for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 01029 // { 01030 // // *w = weights(j,i) 01031 // hidden_activation_pos_i[j] = hidden_act[j] + *w; 01032 // } 01033 // target_act[i] -= hidden_layer->freeEnergyContribution( 01034 // hidden_activation_pos_i); 01035 // } 01036 // 01037 // target_layer->expectation_is_up_to_date = false; 01038 // target_layer->computeExpectation(); 01039 // real nll = target_layer->fpropNLL(target_one_hot); 01040 // 01041 // estimated_target_gradient(i1,j1) = (nll - estimated_target_gradient(i1,j1) )/epsilon; 01042 // target_connection->weights(i1,j1) -= epsilon; 01043 // } 01044 //} 01045 01046 // Multi-class classification 01047 01048 if( input_is_sparse ) 01049 { 01050 if( factorized_connection_rank > 0 ) 01051 { 01052 Vx.clear(); 01053 train_set->getExtra(stage%nsamples,extra); 01054 input_is_active.clear(); 01055 for( int i=0; i<extra.length(); i++ ) 01056 { 01057 Vx += V((int)extra[i]); 01058 input_is_active[(int)extra[i]] = true; 01059 } 01060 01061 product(hidden_act,U,Vx); 01062 } 01063 else 01064 { 01065 hidden_act.clear(); 01066 train_set->getExtra(stage%nsamples,extra); 01067 for( int i=0; i<extra.length(); i++ ) 01068 { 01069 hidden_act += V((int)extra[i]); 01070 input_is_active[(int)extra[i]] = true; 01071 } 01072 } 01073 hidden_act += hidden_layer->bias; 01074 } 01075 else 01076 { 01077 connection->setAsDownInput( input ); 01078 hidden_layer->getAllActivations( 01079 (RBMMatrixConnection*) connection ); 01080 } 01081 01082 for( int i=0 ; i<target_layer->size ; i++ ) 01083 { 01084 target_act[i] = target_layer->bias[i]; 01085 // LATERAL CONNECTIONS CODE HERE!! 01086 real *w = &(target_connection->weights(0,i)); 01087 // step from one row to the next in weights matrix 01088 int m = target_connection->weights.mod(); 01089 01090 for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 01091 { 01092 // *w = weights(j,i) 01093 hidden_activation_pos_i[j] = hidden_act[j] + *w; 01094 } 01095 target_act[i] -= hidden_layer->freeEnergyContribution( 01096 hidden_activation_pos_i); 01097 } 01098 01099 target_layer->expectation_is_up_to_date = false; 01100 target_layer->computeExpectation(); 01101 real nll = target_layer->fpropNLL(target_one_hot); 01102 train_costs[nll_cost_index] = nll; 01103 train_costs[class_cost_index] = 01104 (argmax(target_layer->expectation) == target_index)? 0 : 1; 01105 target_layer->bpropNLL(target_one_hot,nll,class_gradient); 01106 01107 hidden_activation_gradient.clear(); 01108 01109 //Mat target_real_gradient(target_connection->up_size, target_connection->down_size); 01110 for( int i=0 ; i<target_layer->size ; i++ ) 01111 { 01112 real *w = &(target_connection->weights(0,i)); 01113 // step from one row to the next in weights matrix 01114 int m = target_connection->weights.mod(); 01115 01116 for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 01117 { 01118 // *w = weights(j,i) 01119 hidden_activation_pos_i[j] = hidden_act[j] + *w; 01120 } 01121 hidden_layer->freeEnergyContributionGradient( 01122 hidden_activation_pos_i, 01123 hidden_activation_pos_i_gradient, 01124 -class_gradient[i], 01125 false 01126 ); 01127 hidden_activation_gradient += hidden_activation_pos_i_gradient; 01128 01129 // Update target connections 01130 w = &(target_connection->weights(0,i)); 01131 //real* gw = &(target_real_gradient(0,i)); 01132 //int gm = target_real_gradient.mod(); 01133 for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 01134 { 01135 *w -= lr * hidden_activation_pos_i_gradient[j]; 01136 //*gw += hidden_activation_pos_i_gradient[j]; 01137 //gw += gm; 01138 } 01139 01140 } 01141 01142 //real cos_ang = dot(connection_gradient.toVec(),estimated_gradient.toVec()) 01143 // / (norm(connection_gradient.toVec()) *norm(estimated_gradient.toVec())); 01144 //cout << "cos_ang=" << cos_ang << endl; 01145 //cout << "ang=" << acos(cos_ang) << endl; 01146 01147 //real cos_target_ang = dot(target_real_gradient.toVec(),estimated_target_gradient.toVec()) 01148 // / (norm(target_real_gradient.toVec()) *norm(estimated_target_gradient.toVec())); 01149 //cout << "cos_target_ang=" << cos_target_ang << endl; 01150 //cout << "target_ang=" << acos(cos_target_ang) << endl; 01151 01152 // Update target bias 01153 multiplyScaledAdd(class_gradient, 1.0, -lr, 01154 target_layer->bias); 01155 // Hidden bias update 01156 multiplyScaledAdd(hidden_activation_gradient, 1.0, -lr, 01157 hidden_layer->bias); 01158 01159 if( input_is_sparse ) 01160 { 01161 if( factorized_connection_rank > 0 ) 01162 { 01163 externalProduct( U_gradient, hidden_activation_gradient, 01164 Vx ); 01165 transposeProduct( Vx_gradient, U, hidden_activation_gradient ); 01166 for( int i=0; i<extra.length(); i++ ) 01167 { 01168 V((int)extra[i]) -= lr * Vx_gradient; 01169 input_is_active[(int)extra[i]] = false; 01170 } 01171 01172 multiplyScaledAdd( U_gradient, 1.0, -lr, 01173 U ); 01174 } 01175 else 01176 { 01177 for( int i=0; i<extra.length(); i++ ) 01178 { 01179 V((int)extra[i]) -= lr * hidden_activation_gradient; 01180 input_is_active[(int)extra[i]] = false; 01181 } 01182 } 01183 01184 } 01185 else 01186 { 01187 externalProduct( connection_gradient, hidden_activation_gradient, 01188 input ); 01189 01190 // Connection weights update 01191 multiplyScaledAdd( connection_gradient, 1.0, -lr, 01192 connection->weights ); 01193 } 01194 } 01195 if( targetsize() > 1 ) 01196 { 01197 // Multi-task binary classification 01198 PLERROR("NNNNNNNNNNOOOOOOOOOOOOOOOOOOOOOO!!!!!!!!!!!!!!"); 01199 } 01200 01201 if( !fast_exact_is_equal(sparsity_bias_decay, 0.) ) 01202 { 01203 Vec b = hidden_layer->bias; 01204 for( int i=0 ; i<hidden_layer->size ; i++ ) 01205 b[i] -= lr * sparsity_bias_decay; 01206 } 01207 01208 if( !fast_exact_is_equal(learning_rate, 0.) && 01209 (targetsize() == 0 || generative_learning_weight > 0) ) 01210 { 01211 if( !fast_exact_is_equal(decrease_ct, 0) ) 01212 lr = learning_rate / (1.0 + stage * decrease_ct ); 01213 else 01214 lr = learning_rate; 01215 01216 if( targetsize() > 0 ) 01217 lr *= generative_learning_weight; 01218 01219 if( weightsize > 0 ) 01220 lr *= weight; 01221 01222 setLearningRate(lr); 01223 01224 if( is_missing(target[0]) ) 01225 PLERROR("In PseudolikelihoodRBM::train(): generative training with " 01226 "unlabeled examples not supported for pseudolikehood training."); 01227 01228 if( pseudolikelihood_context_size == 0 ) 01229 { 01230 // Compute input_probs 01231 // 01232 // a = W x + c 01233 // for i in 1...d 01234 // num_pos = b_i 01235 // num_neg = 0 01236 // for j in 1...h 01237 // num_pos += softplus( a_j - W_ji x_i + W_ji) 01238 // num_neg += softplus( a_j - W_ji x_i) 01239 // p_i = exp(num_pos) / (exp(num_pos) + exp(num_neg)) 01240 01241 Vec hidden_act = hidden_layer->activation; 01242 01243 real num_pos_act; 01244 real num_neg_act; 01245 real num_pos; 01246 real num_neg; 01247 real* a = hidden_layer->activation.data(); 01248 real* a_pos_i = hidden_activation_pos_i.data(); 01249 real* a_neg_i = hidden_activation_neg_i.data(); 01250 real* w, *gw; 01251 int m; 01252 if( connection ) 01253 m = connection->weights.mod(); 01254 real input_i, input_probs_i; 01255 real pseudolikelihood = 0; 01256 real* ga_pos_i = hidden_activation_pos_i_gradient.data(); 01257 real* ga_neg_i = hidden_activation_neg_i_gradient.data(); 01258 01259 // Randomly select inputs 01260 if( n_selected_inputs_pseudolikelihood <= inputsize() && 01261 n_selected_inputs_pseudolikelihood > 0 ) 01262 { 01263 if ( input_indices.length() == 0 ) 01264 { 01265 input_indices.resize(inputsize()); 01266 for( int i=0; i<input_indices.length(); i++ ) 01267 input_indices[i] = i; 01268 01269 } 01270 01271 // Randomly selected inputs 01272 int tmp; 01273 int k; 01274 for (int j = 0; j < n_selected_inputs_pseudolikelihood; j++) 01275 { 01276 k = j + 01277 random_gen->uniform_multinomial_sample( 01278 inputsize() - j); 01279 01280 tmp = input_indices[j]; 01281 input_indices[j] = input_indices[k]; 01282 input_indices[k] = tmp; 01283 } 01284 } 01285 01286 // Resize V_gradients 01287 if( input_is_sparse ) 01288 { 01289 int n_V_gradients; 01290 if( n_selected_inputs_pseudolikelihood <= inputsize() && 01291 n_selected_inputs_pseudolikelihood > 0 ) 01292 n_V_gradients = n_selected_inputs_pseudolikelihood; 01293 else 01294 n_V_gradients = inputsize(); 01295 01296 if( factorized_connection_rank > 0 ) 01297 V_gradients.resize( 01298 n_V_gradients, 01299 factorized_connection_rank ); 01300 else 01301 V_gradients.resize( 01302 n_V_gradients, 01303 hidden_layer->size ); 01304 } 01305 01306 //Mat estimated_gradient; 01307 //Mat U_estimated_gradient; 01308 //{ 01309 // real epsilon=1e-5; 01310 // // Empirically estimate gradient 01311 // if( input_is_sparse ) 01312 // { 01313 // estimated_gradient.resize(V.length(), V.width()); 01314 // U_estimated_gradient.resize(U.length(), U.width() ); 01315 // 01316 // int i=0; 01317 // pseudolikelihood = 0; 01318 // 01319 // // Compute activations 01320 // if( input_is_sparse ) 01321 // { 01322 // if( factorized_connection_rank > 0 ) 01323 // { 01324 // Vx.clear(); 01325 // train_set->getExtra(stage%nsamples,extra); 01326 // for( int i=0; i<extra.length(); i++ ) 01327 // { 01328 // Vx += V((int)extra[i]); 01329 // input_is_active[(int)extra[i]] = true; 01330 // } 01331 // 01332 // product(hidden_act,U,Vx); 01333 // } 01334 // else 01335 // { 01336 // hidden_act.clear(); 01337 // train_set->getExtra(stage%nsamples,extra); 01338 // for( int i=0; i<extra.length(); i++ ) 01339 // { 01340 // hidden_act += V((int)extra[i]); 01341 // input_is_active[(int)extra[i]] = true; 01342 // } 01343 // } 01344 // hidden_act += hidden_layer->bias; 01345 // } 01346 // else 01347 // { 01348 // connection->setAsDownInput( input ); 01349 // hidden_layer->getAllActivations( 01350 // (RBMMatrixConnection*) connection ); 01351 // } 01352 // 01353 // if( targetsize() == 1 ) 01354 // productAcc( hidden_layer->activation, 01355 // target_connection->weights, 01356 // target_one_hot ); 01357 // else if( targetsize() > 1 ) 01358 // productAcc( hidden_layer->activation, 01359 // target_connection->weights, 01360 // target ); 01361 // 01362 // for( int l=0; l<input_layer->size ; l++ ) 01363 // { 01364 // if( n_selected_inputs_pseudolikelihood <= inputsize() && 01365 // n_selected_inputs_pseudolikelihood > 0 ) 01366 // { 01367 // if( l >= n_selected_inputs_pseudolikelihood ) 01368 // break; 01369 // i = input_indices[l]; 01370 // } 01371 // else 01372 // i = l; 01373 // 01374 // num_pos_act = input_layer->bias[i]; 01375 // // LATERAL CONNECTIONS CODE HERE! 01376 // num_neg_act = 0; 01377 // if( input_is_sparse ) 01378 // { 01379 // hidden_activation_pos_i << hidden_act; 01380 // hidden_activation_neg_i << hidden_act; 01381 // if( factorized_connection_rank > 0 ) 01382 // if( input_is_active[i] ) 01383 // { 01384 // input_i = 1; 01385 // productScaleAcc( hidden_activation_neg_i, 01386 // U, V(i), -1.,1.); 01387 // } 01388 // else 01389 // { 01390 // input_i = 0; 01391 // productScaleAcc( hidden_activation_pos_i, 01392 // U, V(i), 1.,1.); 01393 // } 01394 // else 01395 // if( input_is_active[i] ) 01396 // { 01397 // input_i = 1; 01398 // hidden_activation_neg_i -= V(i); 01399 // } 01400 // else 01401 // { 01402 // input_i = 0; 01403 // hidden_activation_pos_i += V(i); 01404 // } 01405 // } 01406 // else 01407 // { 01408 // w = &(connection->weights(0,i)); 01409 // input_i = input[i]; 01410 // for( int j=0; j<hidden_layer->size; j++,w+=m ) 01411 // { 01412 // a_pos_i[j] = a[j] - *w * ( input_i - 1 ); 01413 // a_neg_i[j] = a[j] - *w * input_i; 01414 // } 01415 // } 01416 // num_pos_act -= hidden_layer->freeEnergyContribution( 01417 // hidden_activation_pos_i); 01418 // num_neg_act -= hidden_layer->freeEnergyContribution( 01419 // hidden_activation_neg_i); 01420 // //num_pos = safeexp(num_pos_act); 01421 // //num_neg = safeexp(num_neg_act); 01422 // //input_probs_i = num_pos / (num_pos + num_neg); 01423 // if( input_layer->use_fast_approximations ) 01424 // input_probs_i = fastsigmoid( 01425 // num_pos_act - num_neg_act); 01426 // else 01427 // { 01428 // num_pos = safeexp(num_pos_act); 01429 // num_neg = safeexp(num_neg_act); 01430 // input_probs_i = num_pos / (num_pos + num_neg); 01431 // } 01432 // if( input_layer->use_fast_approximations ) 01433 // pseudolikelihood += tabulated_softplus( 01434 // num_pos_act - num_neg_act ) 01435 // - input_i * (num_pos_act - num_neg_act); 01436 // else 01437 // pseudolikelihood += softplus( 01438 // num_pos_act - num_neg_act ) 01439 // - input_i * (num_pos_act - num_neg_act); 01440 // 01441 // } 01442 // 01443 // estimated_gradient.fill(pseudolikelihood); 01444 // 01445 // for( int i1=0; i1<estimated_gradient.length(); i1++) 01446 // for( int j1=0; j1<estimated_gradient.width(); j1++) 01447 // { 01448 // V(i1,j1) += epsilon; 01449 // pseudolikelihood = 0; 01450 // 01451 // // Compute activations 01452 // if( input_is_sparse ) 01453 // { 01454 // if( factorized_connection_rank > 0 ) 01455 // { 01456 // Vx.clear(); 01457 // train_set->getExtra(stage%nsamples,extra); 01458 // for( int i=0; i<extra.length(); i++ ) 01459 // { 01460 // Vx += V((int)extra[i]); 01461 // input_is_active[(int)extra[i]] = true; 01462 // } 01463 // 01464 // product(hidden_act,U,Vx); 01465 // } 01466 // else 01467 // { 01468 // hidden_act.clear(); 01469 // train_set->getExtra(stage%nsamples,extra); 01470 // for( int i=0; i<extra.length(); i++ ) 01471 // { 01472 // hidden_act += V((int)extra[i]); 01473 // input_is_active[(int)extra[i]] = true; 01474 // } 01475 // } 01476 // hidden_act += hidden_layer->bias; 01477 // } 01478 // else 01479 // { 01480 // connection->setAsDownInput( input ); 01481 // hidden_layer->getAllActivations( 01482 // (RBMMatrixConnection*) connection ); 01483 // } 01484 // 01485 // if( targetsize() == 1 ) 01486 // productAcc( hidden_layer->activation, 01487 // target_connection->weights, 01488 // target_one_hot ); 01489 // else if( targetsize() > 1 ) 01490 // productAcc( hidden_layer->activation, 01491 // target_connection->weights, 01492 // target ); 01493 // 01494 // for( int l=0; l<input_layer->size ; l++ ) 01495 // { 01496 // if( n_selected_inputs_pseudolikelihood <= inputsize() && 01497 // n_selected_inputs_pseudolikelihood > 0 ) 01498 // { 01499 // if( l >= n_selected_inputs_pseudolikelihood ) 01500 // break; 01501 // i = input_indices[l]; 01502 // } 01503 // else 01504 // i = l; 01505 // 01506 // num_pos_act = input_layer->bias[i]; 01507 // // LATERAL CONNECTIONS CODE HERE! 01508 // num_neg_act = 0; 01509 // if( input_is_sparse ) 01510 // { 01511 // hidden_activation_pos_i << hidden_act; 01512 // hidden_activation_neg_i << hidden_act; 01513 // if( factorized_connection_rank > 0 ) 01514 // if( input_is_active[i] ) 01515 // { 01516 // input_i = 1; 01517 // productScaleAcc( hidden_activation_neg_i, 01518 // U, V(i), -1.,1.); 01519 // } 01520 // else 01521 // { 01522 // input_i = 0; 01523 // productScaleAcc( hidden_activation_pos_i, 01524 // U, V(i), 1.,1.); 01525 // } 01526 // else 01527 // if( input_is_active[i] ) 01528 // { 01529 // input_i = 1; 01530 // hidden_activation_neg_i -= V(i); 01531 // } 01532 // else 01533 // { 01534 // input_i = 0; 01535 // hidden_activation_pos_i += V(i); 01536 // } 01537 // } 01538 // else 01539 // { 01540 // w = &(connection->weights(0,i)); 01541 // input_i = input[i]; 01542 // for( int j=0; j<hidden_layer->size; j++,w+=m ) 01543 // { 01544 // a_pos_i[j] = a[j] - *w * ( input_i - 1 ); 01545 // a_neg_i[j] = a[j] - *w * input_i; 01546 // } 01547 // } 01548 // num_pos_act -= hidden_layer->freeEnergyContribution( 01549 // hidden_activation_pos_i); 01550 // num_neg_act -= hidden_layer->freeEnergyContribution( 01551 // hidden_activation_neg_i); 01552 // //num_pos = safeexp(num_pos_act); 01553 // //num_neg = safeexp(num_neg_act); 01554 // //input_probs_i = num_pos / (num_pos + num_neg); 01555 // if( input_layer->use_fast_approximations ) 01556 // input_probs_i = fastsigmoid( 01557 // num_pos_act - num_neg_act); 01558 // else 01559 // { 01560 // num_pos = safeexp(num_pos_act); 01561 // num_neg = safeexp(num_neg_act); 01562 // input_probs_i = num_pos / (num_pos + num_neg); 01563 // } 01564 // if( input_layer->use_fast_approximations ) 01565 // pseudolikelihood += tabulated_softplus( 01566 // num_pos_act - num_neg_act ) 01567 // - input_i * (num_pos_act - num_neg_act); 01568 // else 01569 // pseudolikelihood += softplus( 01570 // num_pos_act - num_neg_act ) 01571 // - input_i * (num_pos_act - num_neg_act); 01572 // 01573 // } 01574 // V(i1,j1) -= epsilon; 01575 // estimated_gradient(i1,j1) = (pseudolikelihood - estimated_gradient(i1,j1)) 01576 // / epsilon; 01577 // } 01578 // 01579 // if( factorized_connection_rank > 0 ) 01580 // { 01581 // 01582 // pseudolikelihood = 0; 01583 // 01584 // // Compute activations 01585 // if( input_is_sparse ) 01586 // { 01587 // if( factorized_connection_rank > 0 ) 01588 // { 01589 // Vx.clear(); 01590 // train_set->getExtra(stage%nsamples,extra); 01591 // for( int i=0; i<extra.length(); i++ ) 01592 // { 01593 // Vx += V((int)extra[i]); 01594 // input_is_active[(int)extra[i]] = true; 01595 // } 01596 // 01597 // product(hidden_act,U,Vx); 01598 // } 01599 // else 01600 // { 01601 // hidden_act.clear(); 01602 // train_set->getExtra(stage%nsamples,extra); 01603 // for( int i=0; i<extra.length(); i++ ) 01604 // { 01605 // hidden_act += V((int)extra[i]); 01606 // input_is_active[(int)extra[i]] = true; 01607 // } 01608 // } 01609 // hidden_act += hidden_layer->bias; 01610 // } 01611 // else 01612 // { 01613 // connection->setAsDownInput( input ); 01614 // hidden_layer->getAllActivations( 01615 // (RBMMatrixConnection*) connection ); 01616 // } 01617 // 01618 // if( targetsize() == 1 ) 01619 // productAcc( hidden_layer->activation, 01620 // target_connection->weights, 01621 // target_one_hot ); 01622 // else if( targetsize() > 1 ) 01623 // productAcc( hidden_layer->activation, 01624 // target_connection->weights, 01625 // target ); 01626 // 01627 // for( int l=0; l<input_layer->size ; l++ ) 01628 // { 01629 // if( n_selected_inputs_pseudolikelihood <= inputsize() && 01630 // n_selected_inputs_pseudolikelihood > 0 ) 01631 // { 01632 // if( l >= n_selected_inputs_pseudolikelihood ) 01633 // break; 01634 // i = input_indices[l]; 01635 // } 01636 // else 01637 // i = l; 01638 // 01639 // num_pos_act = input_layer->bias[i]; 01640 // // LATERAL CONNECTIONS CODE HERE! 01641 // num_neg_act = 0; 01642 // if( input_is_sparse ) 01643 // { 01644 // hidden_activation_pos_i << hidden_act; 01645 // hidden_activation_neg_i << hidden_act; 01646 // if( factorized_connection_rank > 0 ) 01647 // if( input_is_active[i] ) 01648 // { 01649 // input_i = 1; 01650 // productScaleAcc( hidden_activation_neg_i, 01651 // U, V(i), -1.,1.); 01652 // } 01653 // else 01654 // { 01655 // input_i = 0; 01656 // productScaleAcc( hidden_activation_pos_i, 01657 // U, V(i), 1.,1.); 01658 // } 01659 // else 01660 // if( input_is_active[i] ) 01661 // { 01662 // input_i = 1; 01663 // hidden_activation_neg_i -= V(i); 01664 // } 01665 // else 01666 // { 01667 // input_i = 0; 01668 // hidden_activation_pos_i += V(i); 01669 // } 01670 // } 01671 // else 01672 // { 01673 // w = &(connection->weights(0,i)); 01674 // input_i = input[i]; 01675 // for( int j=0; j<hidden_layer->size; j++,w+=m ) 01676 // { 01677 // a_pos_i[j] = a[j] - *w * ( input_i - 1 ); 01678 // a_neg_i[j] = a[j] - *w * input_i; 01679 // } 01680 // } 01681 // num_pos_act -= hidden_layer->freeEnergyContribution( 01682 // hidden_activation_pos_i); 01683 // num_neg_act -= hidden_layer->freeEnergyContribution( 01684 // hidden_activation_neg_i); 01685 // //num_pos = safeexp(num_pos_act); 01686 // //num_neg = safeexp(num_neg_act); 01687 // //input_probs_i = num_pos / (num_pos + num_neg); 01688 // if( input_layer->use_fast_approximations ) 01689 // input_probs_i = fastsigmoid( 01690 // num_pos_act - num_neg_act); 01691 // else 01692 // { 01693 // num_pos = safeexp(num_pos_act); 01694 // num_neg = safeexp(num_neg_act); 01695 // input_probs_i = num_pos / (num_pos + num_neg); 01696 // } 01697 // if( input_layer->use_fast_approximations ) 01698 // pseudolikelihood += tabulated_softplus( 01699 // num_pos_act - num_neg_act ) 01700 // - input_i * (num_pos_act - num_neg_act); 01701 // else 01702 // pseudolikelihood += softplus( 01703 // num_pos_act - num_neg_act ) 01704 // - input_i * (num_pos_act - num_neg_act); 01705 // 01706 // } 01707 // 01708 // U_estimated_gradient.fill(pseudolikelihood); 01709 // 01710 // for( int i1=0; i1<U_estimated_gradient.length(); i1++) 01711 // for( int j1=0; j1<U_estimated_gradient.width(); j1++) 01712 // { 01713 // U(i1,j1) += epsilon; 01714 // pseudolikelihood = 0; 01715 // 01716 // // Compute activations 01717 // if( input_is_sparse ) 01718 // { 01719 // if( factorized_connection_rank > 0 ) 01720 // { 01721 // Vx.clear(); 01722 // train_set->getExtra(stage%nsamples,extra); 01723 // for( int i=0; i<extra.length(); i++ ) 01724 // { 01725 // Vx += V((int)extra[i]); 01726 // input_is_active[(int)extra[i]] = true; 01727 // } 01728 // 01729 // product(hidden_act,U,Vx); 01730 // } 01731 // else 01732 // { 01733 // hidden_act.clear(); 01734 // train_set->getExtra(stage%nsamples,extra); 01735 // for( int i=0; i<extra.length(); i++ ) 01736 // { 01737 // hidden_act += V((int)extra[i]); 01738 // input_is_active[(int)extra[i]] = true; 01739 // } 01740 // } 01741 // hidden_act += hidden_layer->bias; 01742 // } 01743 // else 01744 // { 01745 // connection->setAsDownInput( input ); 01746 // hidden_layer->getAllActivations( 01747 // (RBMMatrixConnection*) connection ); 01748 // } 01749 // 01750 // if( targetsize() == 1 ) 01751 // productAcc( hidden_layer->activation, 01752 // target_connection->weights, 01753 // target_one_hot ); 01754 // else if( targetsize() > 1 ) 01755 // productAcc( hidden_layer->activation, 01756 // target_connection->weights, 01757 // target ); 01758 // 01759 // for( int l=0; l<input_layer->size ; l++ ) 01760 // { 01761 // if( n_selected_inputs_pseudolikelihood <= inputsize() && 01762 // n_selected_inputs_pseudolikelihood > 0 ) 01763 // { 01764 // if( l >= n_selected_inputs_pseudolikelihood ) 01765 // break; 01766 // i = input_indices[l]; 01767 // } 01768 // else 01769 // i = l; 01770 // 01771 // num_pos_act = input_layer->bias[i]; 01772 // // LATERAL CONNECTIONS CODE HERE! 01773 // num_neg_act = 0; 01774 // if( input_is_sparse ) 01775 // { 01776 // hidden_activation_pos_i << hidden_act; 01777 // hidden_activation_neg_i << hidden_act; 01778 // if( factorized_connection_rank > 0 ) 01779 // if( input_is_active[i] ) 01780 // { 01781 // input_i = 1; 01782 // productScaleAcc( hidden_activation_neg_i, 01783 // U, V(i), -1.,1.); 01784 // } 01785 // else 01786 // { 01787 // input_i = 0; 01788 // productScaleAcc( hidden_activation_pos_i, 01789 // U, V(i), 1.,1.); 01790 // } 01791 // else 01792 // if( input_is_active[i] ) 01793 // { 01794 // input_i = 1; 01795 // hidden_activation_neg_i -= V(i); 01796 // } 01797 // else 01798 // { 01799 // input_i = 0; 01800 // hidden_activation_pos_i += V(i); 01801 // } 01802 // } 01803 // else 01804 // { 01805 // w = &(connection->weights(0,i)); 01806 // input_i = input[i]; 01807 // for( int j=0; j<hidden_layer->size; j++,w+=m ) 01808 // { 01809 // a_pos_i[j] = a[j] - *w * ( input_i - 1 ); 01810 // a_neg_i[j] = a[j] - *w * input_i; 01811 // } 01812 // } 01813 // num_pos_act -= hidden_layer->freeEnergyContribution( 01814 // hidden_activation_pos_i); 01815 // num_neg_act -= hidden_layer->freeEnergyContribution( 01816 // hidden_activation_neg_i); 01817 // //num_pos = safeexp(num_pos_act); 01818 // //num_neg = safeexp(num_neg_act); 01819 // //input_probs_i = num_pos / (num_pos + num_neg); 01820 // if( input_layer->use_fast_approximations ) 01821 // input_probs_i = fastsigmoid( 01822 // num_pos_act - num_neg_act); 01823 // else 01824 // { 01825 // num_pos = safeexp(num_pos_act); 01826 // num_neg = safeexp(num_neg_act); 01827 // input_probs_i = num_pos / (num_pos + num_neg); 01828 // } 01829 // if( input_layer->use_fast_approximations ) 01830 // pseudolikelihood += tabulated_softplus( 01831 // num_pos_act - num_neg_act ) 01832 // - input_i * (num_pos_act - num_neg_act); 01833 // else 01834 // pseudolikelihood += softplus( 01835 // num_pos_act - num_neg_act ) 01836 // - input_i * (num_pos_act - num_neg_act); 01837 // 01838 // } 01839 // U(i1,j1) -= epsilon; 01840 // U_estimated_gradient(i1,j1) = (pseudolikelihood - U_estimated_gradient(i1,j1)) 01841 // / epsilon; 01842 // } 01843 // 01844 // 01845 // } 01846 // } 01847 // else 01848 // { 01849 // estimated_gradient.resize(connection->up_size, connection->down_size); 01850 // 01851 // int i=0; 01852 // pseudolikelihood = 0; 01853 // 01854 // // Compute activations 01855 // if( input_is_sparse ) 01856 // { 01857 // if( factorized_connection_rank > 0 ) 01858 // { 01859 // Vx.clear(); 01860 // train_set->getExtra(stage%nsamples,extra); 01861 // for( int i=0; i<extra.length(); i++ ) 01862 // { 01863 // Vx += V((int)extra[i]); 01864 // input_is_active[(int)extra[i]] = true; 01865 // } 01866 // 01867 // product(hidden_act,U,Vx); 01868 // } 01869 // else 01870 // { 01871 // hidden_act.clear(); 01872 // train_set->getExtra(stage%nsamples,extra); 01873 // for( int i=0; i<extra.length(); i++ ) 01874 // { 01875 // hidden_act += V((int)extra[i]); 01876 // input_is_active[(int)extra[i]] = true; 01877 // } 01878 // } 01879 // hidden_act += hidden_layer->bias; 01880 // } 01881 // else 01882 // { 01883 // connection->setAsDownInput( input ); 01884 // hidden_layer->getAllActivations( 01885 // (RBMMatrixConnection*) connection ); 01886 // } 01887 // 01888 // if( targetsize() == 1 ) 01889 // productAcc( hidden_layer->activation, 01890 // target_connection->weights, 01891 // target_one_hot ); 01892 // else if( targetsize() > 1 ) 01893 // productAcc( hidden_layer->activation, 01894 // target_connection->weights, 01895 // target ); 01896 // 01897 // for( int l=0; l<input_layer->size ; l++ ) 01898 // { 01899 // if( n_selected_inputs_pseudolikelihood <= inputsize() && 01900 // n_selected_inputs_pseudolikelihood > 0 ) 01901 // { 01902 // if( l >= n_selected_inputs_pseudolikelihood ) 01903 // break; 01904 // i = input_indices[l]; 01905 // } 01906 // else 01907 // i = l; 01908 // 01909 // num_pos_act = input_layer->bias[i]; 01910 // // LATERAL CONNECTIONS CODE HERE! 01911 // num_neg_act = 0; 01912 // if( input_is_sparse ) 01913 // { 01914 // hidden_activation_pos_i << hidden_act; 01915 // hidden_activation_neg_i << hidden_act; 01916 // if( factorized_connection_rank > 0 ) 01917 // if( input_is_active[i] ) 01918 // { 01919 // input_i = 1; 01920 // productScaleAcc( hidden_activation_neg_i, 01921 // U, V(i), -1.,1.); 01922 // } 01923 // else 01924 // { 01925 // input_i = 0; 01926 // productScaleAcc( hidden_activation_pos_i, 01927 // U, V(i), 1.,1.); 01928 // } 01929 // else 01930 // if( input_is_active[i] ) 01931 // { 01932 // input_i = 1; 01933 // hidden_activation_neg_i -= V(i); 01934 // } 01935 // else 01936 // { 01937 // input_i = 0; 01938 // hidden_activation_pos_i += V(i); 01939 // } 01940 // } 01941 // else 01942 // { 01943 // w = &(connection->weights(0,i)); 01944 // input_i = input[i]; 01945 // for( int j=0; j<hidden_layer->size; j++,w+=m ) 01946 // { 01947 // a_pos_i[j] = a[j] - *w * ( input_i - 1 ); 01948 // a_neg_i[j] = a[j] - *w * input_i; 01949 // } 01950 // } 01951 // num_pos_act -= hidden_layer->freeEnergyContribution( 01952 // hidden_activation_pos_i); 01953 // num_neg_act -= hidden_layer->freeEnergyContribution( 01954 // hidden_activation_neg_i); 01955 // //num_pos = safeexp(num_pos_act); 01956 // //num_neg = safeexp(num_neg_act); 01957 // //input_probs_i = num_pos / (num_pos + num_neg); 01958 // if( input_layer->use_fast_approximations ) 01959 // input_probs_i = fastsigmoid( 01960 // num_pos_act - num_neg_act); 01961 // else 01962 // { 01963 // num_pos = safeexp(num_pos_act); 01964 // num_neg = safeexp(num_neg_act); 01965 // input_probs_i = num_pos / (num_pos + num_neg); 01966 // } 01967 // if( input_layer->use_fast_approximations ) 01968 // pseudolikelihood += tabulated_softplus( 01969 // num_pos_act - num_neg_act ) 01970 // - input_i * (num_pos_act - num_neg_act); 01971 // else 01972 // pseudolikelihood += softplus( 01973 // num_pos_act - num_neg_act ) 01974 // - input_i * (num_pos_act - num_neg_act); 01975 // 01976 // } 01977 // 01978 // estimated_gradient.fill(pseudolikelihood); 01979 // 01980 // for( int i1=0; i1<estimated_gradient.length(); i1++) 01981 // for( int j1=0; j1<estimated_gradient.width(); j1++) 01982 // { 01983 // connection->weights(i1,j1) += epsilon; 01984 // pseudolikelihood = 0; 01985 // 01986 // // Compute activations 01987 // if( input_is_sparse ) 01988 // { 01989 // if( factorized_connection_rank > 0 ) 01990 // { 01991 // Vx.clear(); 01992 // train_set->getExtra(stage%nsamples,extra); 01993 // for( int i=0; i<extra.length(); i++ ) 01994 // { 01995 // Vx += V((int)extra[i]); 01996 // input_is_active[(int)extra[i]] = true; 01997 // } 01998 // 01999 // product(hidden_act,U,Vx); 02000 // } 02001 // else 02002 // { 02003 // hidden_act.clear(); 02004 // train_set->getExtra(stage%nsamples,extra); 02005 // for( int i=0; i<extra.length(); i++ ) 02006 // { 02007 // hidden_act += V((int)extra[i]); 02008 // input_is_active[(int)extra[i]] = true; 02009 // } 02010 // } 02011 // hidden_act += hidden_layer->bias; 02012 // } 02013 // else 02014 // { 02015 // connection->setAsDownInput( input ); 02016 // hidden_layer->getAllActivations( 02017 // (RBMMatrixConnection*) connection ); 02018 // } 02019 // 02020 // if( targetsize() == 1 ) 02021 // productAcc( hidden_layer->activation, 02022 // target_connection->weights, 02023 // target_one_hot ); 02024 // else if( targetsize() > 1 ) 02025 // productAcc( hidden_layer->activation, 02026 // target_connection->weights, 02027 // target ); 02028 // 02029 // for( int l=0; l<input_layer->size ; l++ ) 02030 // { 02031 // if( n_selected_inputs_pseudolikelihood <= inputsize() && 02032 // n_selected_inputs_pseudolikelihood > 0 ) 02033 // { 02034 // if( l >= n_selected_inputs_pseudolikelihood ) 02035 // break; 02036 // i = input_indices[l]; 02037 // } 02038 // else 02039 // i = l; 02040 // 02041 // num_pos_act = input_layer->bias[i]; 02042 // // LATERAL CONNECTIONS CODE HERE! 02043 // num_neg_act = 0; 02044 // if( input_is_sparse ) 02045 // { 02046 // hidden_activation_pos_i << hidden_act; 02047 // hidden_activation_neg_i << hidden_act; 02048 // if( factorized_connection_rank > 0 ) 02049 // if( input_is_active[i] ) 02050 // { 02051 // input_i = 1; 02052 // productScaleAcc( hidden_activation_neg_i, 02053 // U, V(i), -1.,1.); 02054 // } 02055 // else 02056 // { 02057 // input_i = 0; 02058 // productScaleAcc( hidden_activation_pos_i, 02059 // U, V(i), 1.,1.); 02060 // } 02061 // else 02062 // if( input_is_active[i] ) 02063 // { 02064 // input_i = 1; 02065 // hidden_activation_neg_i -= V(i); 02066 // } 02067 // else 02068 // { 02069 // input_i = 0; 02070 // hidden_activation_pos_i += V(i); 02071 // } 02072 // } 02073 // else 02074 // { 02075 // w = &(connection->weights(0,i)); 02076 // input_i = input[i]; 02077 // for( int j=0; j<hidden_layer->size; j++,w+=m ) 02078 // { 02079 // a_pos_i[j] = a[j] - *w * ( input_i - 1 ); 02080 // a_neg_i[j] = a[j] - *w * input_i; 02081 // } 02082 // } 02083 // num_pos_act -= hidden_layer->freeEnergyContribution( 02084 // hidden_activation_pos_i); 02085 // num_neg_act -= hidden_layer->freeEnergyContribution( 02086 // hidden_activation_neg_i); 02087 // //num_pos = safeexp(num_pos_act); 02088 // //num_neg = safeexp(num_neg_act); 02089 // //input_probs_i = num_pos / (num_pos + num_neg); 02090 // if( input_layer->use_fast_approximations ) 02091 // input_probs_i = fastsigmoid( 02092 // num_pos_act - num_neg_act); 02093 // else 02094 // { 02095 // num_pos = safeexp(num_pos_act); 02096 // num_neg = safeexp(num_neg_act); 02097 // input_probs_i = num_pos / (num_pos + num_neg); 02098 // } 02099 // if( input_layer->use_fast_approximations ) 02100 // pseudolikelihood += tabulated_softplus( 02101 // num_pos_act - num_neg_act ) 02102 // - input_i * (num_pos_act - num_neg_act); 02103 // else 02104 // pseudolikelihood += softplus( 02105 // num_pos_act - num_neg_act ) 02106 // - input_i * (num_pos_act - num_neg_act); 02107 // 02108 // } 02109 // connection->weights(i1,j1) -= epsilon; 02110 // estimated_gradient(i1,j1) = (pseudolikelihood - estimated_gradient(i1,j1)) 02111 // / epsilon; 02112 // } 02113 // 02114 // } 02115 //} 02116 02117 // Compute activations 02118 if( input_is_sparse ) 02119 { 02120 if( factorized_connection_rank > 0 ) 02121 { 02122 Vx.clear(); 02123 train_set->getExtra(stage%nsamples,extra); 02124 for( int i=0; i<extra.length(); i++ ) 02125 { 02126 Vx += V((int)extra[i]); 02127 input_is_active[(int)extra[i]] = true; 02128 } 02129 02130 product(hidden_act,U,Vx); 02131 } 02132 else 02133 { 02134 hidden_act.clear(); 02135 train_set->getExtra(stage%nsamples,extra); 02136 for( int i=0; i<extra.length(); i++ ) 02137 { 02138 hidden_act += V((int)extra[i]); 02139 input_is_active[(int)extra[i]] = true; 02140 } 02141 } 02142 hidden_act += hidden_layer->bias; 02143 } 02144 else 02145 { 02146 connection->setAsDownInput( input ); 02147 hidden_layer->getAllActivations( 02148 (RBMMatrixConnection*) connection ); 02149 } 02150 02151 if( targetsize() == 1 ) 02152 productAcc( hidden_layer->activation, 02153 target_connection->weights, 02154 target_one_hot ); 02155 else if( targetsize() > 1 ) 02156 productAcc( hidden_layer->activation, 02157 target_connection->weights, 02158 target ); 02159 02160 // Clear gradients 02161 hidden_activation_gradient.clear(); 02162 if( !input_is_sparse ) 02163 { 02164 connection_gradient.clear(); 02165 input_gradient.clear(); // If input is sparse, only the 02166 // appropriage elements of this 02167 // gradient will be used 02168 } 02169 02170 if( factorized_connection_rank > 0 ) 02171 { 02172 U_gradient.clear(); 02173 Vx_gradient.clear(); 02174 } 02175 V_gradients.clear(); 02176 02177 int i=0; 02178 pseudolikelihood = 0; 02179 for( int l=0; l<input_layer->size ; l++ ) 02180 { 02181 if( n_selected_inputs_pseudolikelihood <= inputsize() && 02182 n_selected_inputs_pseudolikelihood > 0 ) 02183 { 02184 if( l >= n_selected_inputs_pseudolikelihood ) 02185 break; 02186 i = input_indices[l]; 02187 } 02188 else 02189 i = l; 02190 02191 num_pos_act = input_layer->bias[i]; 02192 // LATERAL CONNECTIONS CODE HERE! 02193 num_neg_act = 0; 02194 if( input_is_sparse ) 02195 { 02196 hidden_activation_pos_i << hidden_act; 02197 hidden_activation_neg_i << hidden_act; 02198 if( factorized_connection_rank > 0 ) 02199 if( input_is_active[i] ) 02200 { 02201 input_i = 1; 02202 productScaleAcc( hidden_activation_neg_i, 02203 U, V(i), -1.,1.); 02204 } 02205 else 02206 { 02207 input_i = 0; 02208 productScaleAcc( hidden_activation_pos_i, 02209 U, V(i), 1.,1.); 02210 } 02211 else 02212 if( input_is_active[i] ) 02213 { 02214 input_i = 1; 02215 hidden_activation_neg_i -= V(i); 02216 } 02217 else 02218 { 02219 input_i = 0; 02220 hidden_activation_pos_i += V(i); 02221 } 02222 } 02223 else 02224 { 02225 w = &(connection->weights(0,i)); 02226 input_i = input[i]; 02227 for( int j=0; j<hidden_layer->size; j++,w+=m ) 02228 { 02229 a_pos_i[j] = a[j] - *w * ( input_i - 1 ); 02230 a_neg_i[j] = a[j] - *w * input_i; 02231 } 02232 } 02233 num_pos_act -= hidden_layer->freeEnergyContribution( 02234 hidden_activation_pos_i); 02235 num_neg_act -= hidden_layer->freeEnergyContribution( 02236 hidden_activation_neg_i); 02237 //num_pos = safeexp(num_pos_act); 02238 //num_neg = safeexp(num_neg_act); 02239 //input_probs_i = num_pos / (num_pos + num_neg); 02240 if( input_layer->use_fast_approximations ) 02241 input_probs_i = fastsigmoid( 02242 num_pos_act - num_neg_act); 02243 else 02244 { 02245 num_pos = safeexp(num_pos_act); 02246 num_neg = safeexp(num_neg_act); 02247 input_probs_i = num_pos / (num_pos + num_neg); 02248 } 02249 02250 // Compute input_prob gradient 02251 if( input_layer->use_fast_approximations ) 02252 pseudolikelihood += tabulated_softplus( 02253 num_pos_act - num_neg_act ) 02254 - input_i * (num_pos_act - num_neg_act); 02255 else 02256 pseudolikelihood += softplus( 02257 num_pos_act - num_neg_act ) 02258 - input_i * (num_pos_act - num_neg_act); 02259 input_gradient[i] = input_probs_i - input_i; 02260 02261 hidden_layer->freeEnergyContributionGradient( 02262 hidden_activation_pos_i, 02263 hidden_activation_pos_i_gradient, 02264 -input_gradient[i], 02265 false); 02266 hidden_activation_gradient += hidden_activation_pos_i_gradient; 02267 02268 hidden_layer->freeEnergyContributionGradient( 02269 hidden_activation_neg_i, 02270 hidden_activation_neg_i_gradient, 02271 input_gradient[i], 02272 false); 02273 hidden_activation_gradient += hidden_activation_neg_i_gradient; 02274 02275 if( input_is_sparse ) 02276 { 02277 if( factorized_connection_rank > 0 ) 02278 { 02279 if( input_is_active[i] ) 02280 { 02281 Vec vg = V_gradients(l); 02282 transposeProductScaleAcc( 02283 vg, U, hidden_activation_neg_i_gradient, 02284 -1., 0); 02285 externalProductScaleAcc( 02286 U_gradient, 02287 hidden_activation_neg_i_gradient, 02288 V(i), -1 ); 02289 } 02290 else 02291 { 02292 Vec vg = V_gradients(l); 02293 transposeProduct( 02294 vg, U, hidden_activation_pos_i_gradient); 02295 externalProductAcc( 02296 U_gradient, 02297 hidden_activation_pos_i_gradient, 02298 V(i) ); 02299 } 02300 } 02301 else 02302 { 02303 if( input_is_active[i] ) 02304 V_gradients(l) -= 02305 hidden_activation_neg_i_gradient; 02306 else 02307 V_gradients(l) += 02308 hidden_activation_pos_i_gradient; 02309 } 02310 } 02311 else 02312 { 02313 gw = &(connection_gradient(0,i)); 02314 for( int j=0; j<hidden_layer->size; j++,gw+=m ) 02315 { 02316 *gw -= ga_pos_i[j] * ( input_i - 1 ); 02317 *gw -= ga_neg_i[j] * input_i; 02318 } 02319 } 02320 } 02321 02322 // Hidden bias update 02323 multiplyScaledAdd(hidden_activation_gradient, 1.0, -lr, 02324 hidden_layer->bias); 02325 02326 if( input_is_sparse ) 02327 { 02328 //Mat true_gradient(V.length(), V.width()); 02329 if( factorized_connection_rank > 0 ) 02330 { 02331 // Factorized connection U update 02332 externalProductAcc( U_gradient, 02333 hidden_activation_gradient, 02334 Vx ); 02335 multiplyScaledAdd( U_gradient, 1.0, -lr, U ); 02336 02337 //real U_cos_ang = dot(U_gradient.toVec(),U_estimated_gradient.toVec()) 02338 // / (norm(U_gradient.toVec()) *norm(U_estimated_gradient.toVec())); 02339 //cout << "U_cos_ang=" << U_cos_ang << endl; 02340 //cout << "U_ang=" << acos(U_cos_ang) << endl; 02341 02342 02343 // Factorized connection V update 02344 transposeProduct( Vx_gradient, U, 02345 hidden_activation_gradient ); 02346 for( int e=0; e<extra.length(); e++ ) 02347 { 02348 V((int)extra[e]) -= lr * Vx_gradient; 02349 input_is_active[(int)extra[e]] = false; 02350 //true_gradient((int)extra[e]) += Vx_gradient; 02351 } 02352 } 02353 else 02354 { 02355 // Update input connection V 02356 for( int e=0; e<extra.length(); e++ ) 02357 { 02358 V((int)extra[e]) -= lr * hidden_activation_gradient; 02359 input_is_active[(int)extra[e]] = false; 02360 //true_gradient((int)extra[e]) += hidden_activation_gradient; 02361 } 02362 } 02363 02364 for( int l=0; l<input_layer->size ; l++ ) 02365 { 02366 if( n_selected_inputs_pseudolikelihood <= inputsize() 02367 && n_selected_inputs_pseudolikelihood > 0 ) 02368 { 02369 if( l >= n_selected_inputs_pseudolikelihood ) 02370 break; 02371 i = input_indices[l]; 02372 } 02373 else 02374 i = l; 02375 // Extra V gradients 02376 V(i) -= lr * V_gradients(l); 02377 //true_gradient(i) += V_gradients(l); 02378 02379 // Input update 02380 input_layer->bias[i] -= lr * input_gradient[i]; 02381 } 02382 02383 //real cos_ang = dot(true_gradient.toVec(),estimated_gradient.toVec()) 02384 // / (norm(true_gradient.toVec()) *norm(estimated_gradient.toVec())); 02385 //cout << "cos_ang=" << cos_ang << endl; 02386 //cout << "ang=" << acos(cos_ang) << endl; 02387 02388 } 02389 else 02390 { 02391 externalProductAcc( connection_gradient, hidden_activation_gradient, 02392 input ); 02393 02394 //real cos_ang = dot(connection_gradient.toVec(),estimated_gradient.toVec()) 02395 // / (norm(connection_gradient.toVec()) *norm(estimated_gradient.toVec())); 02396 //cout << "cos_ang=" << cos_ang << endl; 02397 //cout << "ang=" << acos(cos_ang) << endl; 02398 02399 // Connection weights update 02400 multiplyScaledAdd( connection_gradient, 1.0, -lr, 02401 connection->weights ); 02402 // Input bias update 02403 multiplyScaledAdd(input_gradient, 1.0, -lr, 02404 input_layer->bias); 02405 } 02406 02407 02408 if( targetsize() == 1 ) 02409 externalProductScaleAcc( target_connection->weights, 02410 hidden_activation_gradient, 02411 target_one_hot, 02412 -lr ); 02413 if( targetsize() > 1 ) 02414 externalProductScaleAcc( target_connection->weights, 02415 hidden_activation_gradient, 02416 target, 02417 -lr ); 02418 02419 // N.B.: train costs contains pseudolikelihood 02420 // or pseudoNLL, not NLL 02421 if( compute_input_space_nll && targetsize() == 0 ) 02422 train_costs[nll_cost_index] = pseudolikelihood; 02423 //mean_pseudolikelihood += pseudolikelihood; 02424 // cout << "input_gradient: " << input_gradient << endl; 02425 // cout << "hidden_activation_gradient" << hidden_activation_gradient << endl; 02426 02427 } 02428 else 02429 { 02430 if( input_is_sparse ) 02431 PLERROR("In PseudolikelihoodRBM::train(): " 02432 "pseudolikelihood_context_size with > 0 " 02433 "not implemented for sparse inputs"); 02434 02435 if( ( pseudolikelihood_context_type == "most_correlated" || 02436 pseudolikelihood_context_type == "most_correlated_uniform_random" ) 02437 && correlations_per_i.length() == 0 ) 02438 { 02439 Vec corr_input(inputsize()); 02440 Vec corr_target(targetsize()); 02441 real corr_weight; 02442 Vec mean(inputsize()); 02443 mean.clear(); 02444 for(int t=0; t<train_set->length(); t++) 02445 { 02446 train_set->getExample(t,corr_input,corr_target, 02447 corr_weight); 02448 mean += corr_input; 02449 } 02450 mean /= train_set->length(); 02451 02452 correlations_per_i.resize(inputsize(),inputsize()); 02453 correlations_per_i.clear(); 02454 Mat cov(inputsize(), inputsize()); 02455 cov.clear(); 02456 for(int t=0; t<train_set->length(); t++) 02457 { 02458 train_set->getExample(t,corr_input,corr_target, 02459 corr_weight); 02460 corr_input -= mean; 02461 externalProductAcc(cov, 02462 corr_input,corr_input); 02463 } 02464 //correlations_per_i /= train_set->length(); 02465 02466 for( int i=0; i<inputsize(); i++ ) 02467 for( int j=0; j<inputsize(); j++) 02468 { 02469 correlations_per_i(i,j) = 02470 abs(cov(i,j)) 02471 / sqrt(cov(i,i)*cov(j,j)); 02472 } 02473 02474 if( pseudolikelihood_context_type == "most_correlated") 02475 { 02476 if( pseudolikelihood_context_size <= 0 ) 02477 PLERROR("In PseudolikelihoodRBM::train(): " 02478 "pseudolikelihood_context_size should be > 0 " 02479 "for \"most_correlated\" context type"); 02480 real current_min; 02481 int current_min_position; 02482 real* corr; 02483 int* context; 02484 Vec context_corr(pseudolikelihood_context_size); 02485 context_indices_per_i.resize( 02486 inputsize(), 02487 pseudolikelihood_context_size); 02488 02489 // HUGO: this is quite inefficient for big 02490 // pseudolikelihood_context_sizes, should use a heap 02491 for( int i=0; i<inputsize(); i++ ) 02492 { 02493 current_min = REAL_MAX; 02494 current_min_position = -1; 02495 corr = correlations_per_i[i]; 02496 context = context_indices_per_i[i]; 02497 for( int j=0; j<inputsize(); j++ ) 02498 { 02499 if( i == j ) 02500 continue; 02501 02502 // Filling first pseudolikelihood_context_size elements 02503 if( j - (j>i?1:0) < pseudolikelihood_context_size ) 02504 { 02505 context[j - (j>i?1:0)] = j; 02506 context_corr[j - (j>i?1:0)] = corr[j]; 02507 if( current_min > corr[j] ) 02508 { 02509 current_min = corr[j]; 02510 current_min_position = j - (j>i?1:0); 02511 } 02512 continue; 02513 } 02514 02515 if( corr[j] > current_min ) 02516 { 02517 context[current_min_position] = j; 02518 context_corr[current_min_position] = corr[j]; 02519 current_min = 02520 min( context_corr, 02521 current_min_position ); 02522 } 02523 } 02524 } 02525 } 02526 02527 if( pseudolikelihood_context_type == 02528 "most_correlated_uniform_random" ) 02529 { 02530 if( k_most_correlated < 02531 pseudolikelihood_context_size ) 02532 PLERROR("In PseudolikelihoodRBM::train(): " 02533 "k_most_correlated should be " 02534 ">= pseudolikelihood_context_size"); 02535 02536 if( k_most_correlated > inputsize() - 1 ) 02537 PLERROR("In PseudolikelihoodRBM::train(): " 02538 "k_most_correlated should be " 02539 "< inputsize()"); 02540 02541 real current_min; 02542 int current_min_position; 02543 real* corr; 02544 int* context; 02545 Vec context_corr( k_most_correlated ); 02546 context_most_correlated.resize( inputsize() ); 02547 02548 // HUGO: this is quite inefficient for big 02549 // pseudolikelihood_context_sizes, should use a heap 02550 for( int i=0; i<inputsize(); i++ ) 02551 { 02552 context_most_correlated[i].resize( 02553 k_most_correlated ); 02554 current_min = REAL_MAX; 02555 current_min_position = -1; 02556 corr = correlations_per_i[i]; 02557 context = context_most_correlated[i].data(); 02558 for( int j=0; j<inputsize(); j++ ) 02559 { 02560 if( i == j ) 02561 continue; 02562 02563 // Filling first k_most_correlated elements 02564 if( j - (j>i?1:0) < k_most_correlated ) 02565 { 02566 context[j - (j>i?1:0)] = j; 02567 context_corr[j - (j>i?1:0)] = corr[j]; 02568 if( current_min > corr[j] ) 02569 { 02570 current_min = corr[j]; 02571 current_min_position = j - (j>i?1:0); 02572 } 02573 continue; 02574 } 02575 02576 if( corr[j] > current_min ) 02577 { 02578 context[current_min_position] = j; 02579 context_corr[current_min_position] = corr[j]; 02580 current_min = 02581 min( context_corr, 02582 current_min_position ); 02583 } 02584 } 02585 } 02586 } 02587 } 02588 02589 if( pseudolikelihood_context_type == "uniform_random" || 02590 pseudolikelihood_context_type == "most_correlated_uniform_random" ) 02591 { 02592 // Generate contexts 02593 if( pseudolikelihood_context_type == "uniform_random" ) 02594 for( int i=0; i<context_indices.length(); i++) 02595 context_indices[i] = i; 02596 int tmp,k; 02597 int* c; 02598 int n; 02599 if( pseudolikelihood_context_type == "uniform_random" ) 02600 { 02601 c = context_indices.data(); 02602 n = input_layer->size-1; 02603 } 02604 int* ci; 02605 for( int i=0; i<context_indices_per_i.length(); i++) 02606 { 02607 if( pseudolikelihood_context_type == 02608 "most_correlated_uniform_random" ) 02609 { 02610 c = context_most_correlated[i].data(); 02611 n = context_most_correlated[i].length(); 02612 } 02613 02614 ci = context_indices_per_i[i]; 02615 for (int j = 0; j < context_indices_per_i.width(); j++) 02616 { 02617 k = j + 02618 random_gen->uniform_multinomial_sample(n - j); 02619 02620 tmp = c[j]; 02621 c[j] = c[k]; 02622 c[k] = tmp; 02623 02624 if( pseudolikelihood_context_type 02625 == "uniform_random" ) 02626 { 02627 if( c[j] >= i ) 02628 ci[j] = c[j]+1; 02629 else 02630 ci[j] = c[j]; 02631 } 02632 02633 if( pseudolikelihood_context_type == 02634 "most_correlated_uniform_random" ) 02635 ci[j] = c[j]; 02636 } 02637 } 02638 } 02639 02640 connection->setAsDownInput( input ); 02641 hidden_layer->getAllActivations( 02642 (RBMMatrixConnection*) connection ); 02643 02644 if( targetsize() == 1 ) 02645 productAcc( hidden_layer->activation, 02646 target_connection->weights, 02647 target_one_hot ); 02648 else if( targetsize() > 1 ) 02649 productAcc( hidden_layer->activation, 02650 target_connection->weights, 02651 target ); 02652 02653 int n_conf = ipow(2, pseudolikelihood_context_size); 02654 //nums_act.resize( 2 * n_conf ); 02655 //gnums_act.resize( 2 * n_conf ); 02656 //context_probs.resize( 2 * n_conf ); 02657 //hidden_activations_context.resize( 2*n_conf, hidden_layer->size ); 02658 //hidden_activations_context_k_gradient.resize( hidden_layer->size ); 02659 real* nums_data; 02660 real* gnums_data; 02661 real* cp_data; 02662 real* a = hidden_layer->activation.data(); 02663 real* w, *gw, *gi, *ac, *bi, *gac; 02664 int* context_i; 02665 int m; 02666 int conf_index; 02667 real input_i, input_j, log_Zi; 02668 real pseudolikelihood = 0; 02669 02670 input_gradient.clear(); 02671 hidden_activation_gradient.clear(); 02672 connection_gradient.clear(); 02673 gi = input_gradient.data(); 02674 bi = input_layer->bias.data(); 02675 for( int i=0; i<input_layer->size ; i++ ) 02676 { 02677 nums_data = nums_act.data(); 02678 cp_data = context_probs.data(); 02679 input_i = input[i]; 02680 02681 if( connection ) 02682 m = connection->weights.mod(); 02683 // input_i = 1 02684 for( int k=0; k<n_conf; k++) 02685 { 02686 *nums_data = bi[i]; 02687 *cp_data = input_i; 02688 conf_index = k; 02689 ac = hidden_activations_context[k]; 02690 02691 w = &(connection->weights(0,i)); 02692 for( int j=0; j<hidden_layer->size; j++,w+=m ) 02693 ac[j] = a[j] - *w * ( input_i - 1 ); 02694 02695 context_i = context_indices_per_i[i]; 02696 for( int l=0; l<pseudolikelihood_context_size; l++ ) 02697 { 02698 input_j = input[*context_i]; 02699 w = &(connection->weights(0,*context_i)); 02700 if( conf_index & 1) 02701 { 02702 *cp_data *= input_j; 02703 *nums_data += bi[*context_i]; 02704 for( int j=0; j<hidden_layer->size; j++,w+=m ) 02705 ac[j] -= *w * ( input_j - 1 ); 02706 } 02707 else 02708 { 02709 *cp_data *= (1-input_j); 02710 for( int j=0; j<hidden_layer->size; j++,w+=m ) 02711 ac[j] -= *w * input_j; 02712 } 02713 02714 conf_index >>= 1; 02715 context_i++; 02716 } 02717 *nums_data -= hidden_layer->freeEnergyContribution( 02718 hidden_activations_context(k)); 02719 nums_data++; 02720 cp_data++; 02721 } 02722 02723 // input_i = 0 02724 for( int k=0; k<n_conf; k++) 02725 { 02726 *nums_data = 0; 02727 *cp_data = (1-input_i); 02728 conf_index = k; 02729 ac = hidden_activations_context[n_conf + k]; 02730 02731 w = &(connection->weights(0,i)); 02732 for( int j=0; j<hidden_layer->size; j++,w+=m ) 02733 ac[j] = a[j] - *w * input_i; 02734 02735 context_i = context_indices_per_i[i]; 02736 for( int l=0; l<pseudolikelihood_context_size; l++ ) 02737 { 02738 w = &(connection->weights(0,*context_i)); 02739 input_j = input[*context_i]; 02740 if( conf_index & 1) 02741 { 02742 *cp_data *= input_j; 02743 *nums_data += bi[*context_i]; 02744 for( int j=0; j<hidden_layer->size; j++,w+=m ) 02745 ac[j] -= *w * ( input_j - 1 ); 02746 } 02747 else 02748 { 02749 *cp_data *= (1-input_j); 02750 for( int j=0; j<hidden_layer->size; j++,w+=m ) 02751 ac[j] -= *w * input_j; 02752 } 02753 02754 conf_index >>= 1; 02755 context_i++; 02756 } 02757 *nums_data -= hidden_layer->freeEnergyContribution( 02758 hidden_activations_context(n_conf + k)); 02759 nums_data++; 02760 cp_data++; 02761 } 02762 02763 02764 // Gradient computation 02765 //exp( nums_act, nums); 02766 //Zi = sum(nums); 02767 //log_Zi = pl_log(Zi); 02768 log_Zi = logadd(nums_act); 02769 02770 nums_data = nums_act.data(); 02771 gnums_data = gnums_act.data(); 02772 cp_data = context_probs.data(); 02773 02774 // Compute input_prob gradient 02775 02776 m = connection_gradient.mod(); 02777 // input_i = 1 02778 for( int k=0; k<n_conf; k++) 02779 { 02780 pseudolikelihood -= *cp_data * (*nums_data - log_Zi); 02781 *gnums_data = (safeexp(*nums_data - log_Zi) - *cp_data); 02782 gi[i] += *gnums_data; 02783 02784 hidden_layer->freeEnergyContributionGradient( 02785 hidden_activations_context(k), 02786 hidden_activations_context_k_gradient, 02787 -*gnums_data, 02788 false); 02789 hidden_activation_gradient += 02790 hidden_activations_context_k_gradient; 02791 02792 gac = hidden_activations_context_k_gradient.data(); 02793 gw = &(connection_gradient(0,i)); 02794 for( int j=0; j<hidden_layer->size; j++,gw+=m ) 02795 *gw -= gac[j] * ( input_i - 1 ); 02796 02797 context_i = context_indices_per_i[i]; 02798 for( int l=0; l<pseudolikelihood_context_size; l++ ) 02799 { 02800 gw = &(connection_gradient(0,*context_i)); 02801 input_j = input[*context_i]; 02802 if( conf_index & 1) 02803 { 02804 gi[*context_i] += *gnums_data; 02805 for( int j=0; j<hidden_layer->size; j++,gw+=m ) 02806 *gw -= gac[j] * ( input_j - 1 ); 02807 } 02808 else 02809 { 02810 for( int j=0; j<hidden_layer->size; j++,gw+=m ) 02811 *gw -= gac[j] * input_j; 02812 } 02813 conf_index >>= 1; 02814 context_i++; 02815 } 02816 02817 nums_data++; 02818 gnums_data++; 02819 cp_data++; 02820 } 02821 02822 // input_i = 0 02823 for( int k=0; k<n_conf; k++) 02824 { 02825 pseudolikelihood -= *cp_data * (*nums_data - log_Zi); 02826 *gnums_data = (safeexp(*nums_data - log_Zi) - *cp_data); 02827 02828 hidden_layer->freeEnergyContributionGradient( 02829 hidden_activations_context(n_conf + k), 02830 hidden_activations_context_k_gradient, 02831 -*gnums_data, 02832 false); 02833 hidden_activation_gradient += 02834 hidden_activations_context_k_gradient; 02835 02836 gac = hidden_activations_context_k_gradient.data(); 02837 gw = &(connection_gradient(0,i)); 02838 for( int j=0; j<hidden_layer->size; j++,gw+=m ) 02839 *gw -= gac[j] *input_i; 02840 02841 context_i = context_indices_per_i[i]; 02842 for( int l=0; l<pseudolikelihood_context_size; l++ ) 02843 { 02844 gw = &(connection_gradient(0,*context_i)); 02845 input_j = input[*context_i]; 02846 if( conf_index & 1) 02847 { 02848 gi[*context_i] += *gnums_data; 02849 for( int j=0; j<hidden_layer->size; j++,gw+=m ) 02850 *gw -= gac[j] * ( input_j - 1 ); 02851 } 02852 else 02853 { 02854 for( int j=0; j<hidden_layer->size; j++,gw+=m ) 02855 *gw -= gac[j] * input_j; 02856 } 02857 02858 conf_index >>= 1; 02859 context_i++; 02860 } 02861 02862 nums_data++; 02863 gnums_data++; 02864 cp_data++; 02865 } 02866 } 02867 02868 // cout << "input_gradient: " << input_gradient << endl; 02869 // cout << "hidden_activation_gradient" << hidden_activation_gradient << endl; 02870 02871 externalProductAcc( connection_gradient, hidden_activation_gradient, 02872 input ); 02873 02874 // Hidden bias update 02875 multiplyScaledAdd(hidden_activation_gradient, 1.0, -lr, 02876 hidden_layer->bias); 02877 // Connection weights update 02878 multiplyScaledAdd( connection_gradient, 1.0, -lr, 02879 connection->weights ); 02880 // Input bias update 02881 multiplyScaledAdd(input_gradient, 1.0, -lr, 02882 input_layer->bias); 02883 02884 if( targetsize() == 1 ) 02885 externalProductScaleAcc( target_connection->weights, 02886 hidden_activation_gradient, 02887 target_one_hot, 02888 -lr ); 02889 if( targetsize() > 1 ) 02890 externalProductScaleAcc( target_connection->weights, 02891 hidden_activation_gradient, 02892 target, 02893 -lr ); 02894 02895 // N.B.: train costs contains pseudolikelihood 02896 // or pseudoNLL, not NLL 02897 if( compute_input_space_nll && targetsize() == 0 ) 02898 train_costs[nll_cost_index] = pseudolikelihood; 02899 } 02900 } 02901 02902 // CD learning 02903 if( !fast_exact_is_equal(cd_learning_rate, 0.) && 02904 (targetsize() == 0 || generative_learning_weight > 0) ) 02905 { 02906 if( input_is_sparse ) 02907 { 02908 if( is_missing(target[0]) ) 02909 PLERROR("In PseudolikelihoodRBM::train(): generative training with " 02910 "unlabeled examples not supported for CD training with " 02911 "sparse inputs."); 02912 02913 // Randomly select inputs 02914 if( n_selected_inputs_cd > inputsize() || 02915 n_selected_inputs_cd <= 0 ) 02916 PLERROR("In PseudolikelihoodRBM::train(): " 02917 "n_selected_inputs_cd should be > 0 and " 02918 "<= inputsize()" ); 02919 02920 if ( input_indices.length() == 0 ) 02921 { 02922 input_indices.resize(inputsize()); 02923 for( int i=0; i<input_indices.length(); i++ ) 02924 input_indices[i] = i; 02925 02926 } 02927 02928 // Randomly selected inputs 02929 int tmp; 02930 int k; 02931 for (int j = 0; j < n_selected_inputs_cd; j++) 02932 { 02933 k = j + 02934 random_gen->uniform_multinomial_sample( 02935 inputsize() - j); 02936 02937 tmp = input_indices[j]; 02938 input_indices[j] = input_indices[k]; 02939 input_indices[k] = tmp; 02940 } 02941 02942 if( factorized_connection_rank > 0 ) 02943 PLERROR("In PseudolikelihoodRBM::train(): factorized " 02944 "connection is not implemented for CD and " 02945 "sparse inputs" ); 02946 02947 if( !fast_exact_is_equal(persistent_cd_weight, 0) ) 02948 PLERROR("In PseudolikelihoodRBM::train(): persistent CD " 02949 "cannot be used for sparse inputs" ); 02950 02951 if( use_mean_field_cd ) 02952 PLERROR("In PseudolikelihoodRBM::train(): MF-CD " 02953 "is not implemented for sparse inputs" ); 02954 02955 if( !fast_exact_is_equal(cd_decrease_ct, 0) ) 02956 lr = cd_learning_rate / (1.0 + stage * cd_decrease_ct ); 02957 else 02958 lr = cd_learning_rate; 02959 02960 if( targetsize() > 0 ) 02961 lr *= generative_learning_weight; 02962 02963 if( weightsize > 0 ) 02964 lr *= weight; 02965 02966 setLearningRate(lr); 02967 02968 // Positive phase 02969 if( targetsize() > 0 ) 02970 pos_target = target_one_hot; 02971 02972 Vec hidden_act = hidden_layer->activation; 02973 hidden_act.clear(); 02974 hidden_act_non_selected.clear(); 02975 train_set->getExtra(stage%nsamples,extra); 02976 input_is_selected.resize( extra.length() ); 02977 input_is_selected.clear(); 02978 for( int i=0; i<extra.length(); i++ ) 02979 { 02980 hidden_act += V((int)extra[i]); 02981 if( input_indices.subVec(0,n_selected_inputs_cd).find((int)extra[i]) >= 0 ) 02982 { 02983 input_is_selected[i] = true; 02984 pos_input_sparse[(int)extra[i]] = 1; 02985 } 02986 else 02987 hidden_act_non_selected += V((int)extra[i]); 02988 } 02989 hidden_act += hidden_layer->bias; 02990 hidden_act_non_selected += hidden_layer->bias; 02991 02992 if( targetsize() == 1 ) 02993 productAcc( hidden_layer->activation, 02994 target_connection->weights, 02995 target_one_hot ); 02996 else if( targetsize() > 1 ) 02997 productAcc( hidden_layer->activation, 02998 target_connection->weights, 02999 target ); 03000 03001 hidden_layer->expectation_is_not_up_to_date(); 03002 hidden_layer->computeExpectation(); 03003 //pos_hidden.resize( hidden_layer->size ); 03004 pos_hidden << hidden_layer->expectation; 03005 03006 // Negative phase 03007 real *w; 03008 Vec input_act = input_layer->activation; 03009 Vec input_sample = input_layer->sample; 03010 Vec hidden_sample = hidden_layer->sample; 03011 int in; 03012 for(int i=0; i<cd_n_gibbs; i++) 03013 { 03014 // Down pass 03015 hidden_layer->generateSample(); 03016 for (int j = 0; j < n_selected_inputs_cd; j++) 03017 { 03018 in = input_indices[j]; 03019 w = V[in]; 03020 input_act[in] = input_layer->bias[in]; 03021 for( int k=0; k<hidden_layer->size; k++ ) 03022 input_act[in] += w[k] * hidden_sample[k]; 03023 03024 if( input_layer->use_fast_approximations ) 03025 { 03026 input_sample[in] = random_gen->binomial_sample( 03027 fastsigmoid( input_act[in] )); 03028 } 03029 else 03030 { 03031 input_sample[in] = random_gen->binomial_sample( 03032 fastsigmoid( input_act[in] )); 03033 } 03034 } 03035 03036 // Up pass 03037 hidden_act << hidden_act_non_selected; 03038 for (int j = 0; j < n_selected_inputs_cd; j++) 03039 { 03040 in = input_indices[j]; 03041 if( fast_exact_is_equal(input_sample[in], 1) ) 03042 hidden_act += V(in); 03043 } 03044 03045 if( targetsize() > 0 ) 03046 { 03047 // Down-up pass for target 03048 target_connection->setAsUpInput( 03049 hidden_layer->sample ); 03050 target_layer->getAllActivations( 03051 (RBMMatrixConnection*) target_connection ); 03052 target_layer->computeExpectation(); 03053 target_layer->generateSample(); 03054 productAcc( hidden_act, 03055 target_connection->weights, 03056 target_layer->sample ); 03057 } 03058 03059 hidden_layer->expectation_is_not_up_to_date(); 03060 hidden_layer->computeExpectation(); 03061 } 03062 03063 neg_hidden = hidden_layer->expectation; 03064 03065 hidden_layer->update(pos_hidden,neg_hidden); 03066 if( targetsize() > 0 ) 03067 { 03068 neg_target = target_layer->sample; 03069 target_layer->update(pos_target,neg_target); 03070 target_connection->update(pos_target,pos_hidden, 03071 neg_target,neg_hidden); 03072 } 03073 03074 // Selected inputs connection update 03075 for (int j = 0; j < n_selected_inputs_cd; j++) 03076 { 03077 in = input_indices[j]; 03078 w = V[in]; 03079 for( int k=0; k<hidden_layer->size; k++ ) 03080 w[k] += lr * (pos_hidden[k] * pos_input_sparse[in] - 03081 neg_hidden[k] * input_sample[in]); 03082 input_layer->bias[in] += lr * ( pos_input_sparse[in] - 03083 input_sample[in]); 03084 } 03085 03086 // Non-selected inputs connection update 03087 hidden_activation_gradient << neg_hidden; 03088 hidden_activation_gradient -= pos_hidden; 03089 hidden_activation_gradient *= -lr; 03090 for( int i=0; i<extra.length(); i++ ) 03091 { 03092 if( input_is_selected[i] == true ) 03093 pos_input_sparse[(int)extra[i]] = 0; 03094 else 03095 V((int)extra[i]) += hidden_activation_gradient; 03096 } 03097 } 03098 else 03099 { 03100 if( !fast_exact_is_equal(persistent_cd_weight, 1.) ) 03101 { 03102 if( !fast_exact_is_equal(cd_decrease_ct, 0) ) 03103 lr = cd_learning_rate / (1.0 + stage * cd_decrease_ct ); 03104 else 03105 lr = cd_learning_rate; 03106 03107 if( targetsize() > 0 ) 03108 lr *= generative_learning_weight; 03109 03110 lr *= (1-persistent_cd_weight); 03111 03112 if( weightsize > 0 ) 03113 lr *= weight; 03114 03115 setLearningRate(lr); 03116 03117 // Positive phase 03118 pos_input = input; 03119 if( targetsize() > 0) 03120 { 03121 if( is_missing(target[0]) ) 03122 { 03123 // Sample from p(y|x) 03124 lr *= semi_sup_learning_weight/generative_learning_weight; 03125 // Get output probabilities 03126 connection->setAsDownInput( input ); 03127 hidden_layer->getAllActivations( 03128 (RBMMatrixConnection*) connection ); 03129 03130 Vec target_act = target_layer->activation; 03131 Vec hidden_act = hidden_layer->activation; 03132 for( int i=0 ; i<target_layer->size ; i++ ) 03133 { 03134 target_act[i] = target_layer->bias[i]; 03135 // LATERAL CONNECTIONS CODE HERE!! 03136 real *w = &(target_connection->weights(0,i)); 03137 // step from one row to the next in weights matrix 03138 int m = target_connection->weights.mod(); 03139 03140 for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 03141 { 03142 // *w = weights(j,i) 03143 hidden_activation_pos_i[j] = hidden_act[j] + *w; 03144 } 03145 target_act[i] -= hidden_layer->freeEnergyContribution( 03146 hidden_activation_pos_i); 03147 } 03148 03149 target_layer->expectation_is_up_to_date = false; 03150 target_layer->computeExpectation(); 03151 target_layer->generateSample(); 03152 target_one_hot << target_layer->sample; 03153 } 03154 pos_target = target_one_hot; 03155 } 03156 connection->setAsDownInput( input ); 03157 hidden_layer->getAllActivations( 03158 (RBMMatrixConnection*) connection ); 03159 if( targetsize() == 1 ) 03160 productAcc( hidden_layer->activation, 03161 target_connection->weights, 03162 target_one_hot ); 03163 else if( targetsize() > 1 ) 03164 productAcc( hidden_layer->activation, 03165 target_connection->weights, 03166 target ); 03167 03168 hidden_layer->computeExpectation(); 03169 //pos_hidden.resize( hidden_layer->size ); 03170 pos_hidden << hidden_layer->expectation; 03171 03172 // Negative phase 03173 for(int i=0; i<cd_n_gibbs; i++) 03174 { 03175 if( use_mean_field_cd ) 03176 { 03177 connection->setAsUpInput( hidden_layer->expectation ); 03178 } 03179 else 03180 { 03181 hidden_layer->generateSample(); 03182 connection->setAsUpInput( hidden_layer->sample ); 03183 } 03184 input_layer->getAllActivations( 03185 (RBMMatrixConnection*) connection ); 03186 input_layer->computeExpectation(); 03187 // LATERAL CONNECTIONS CODE HERE! 03188 03189 if( use_mean_field_cd ) 03190 { 03191 connection->setAsDownInput( input_layer->expectation ); 03192 } 03193 else 03194 { 03195 input_layer->generateSample(); 03196 connection->setAsDownInput( input_layer->sample ); 03197 } 03198 03199 hidden_layer->getAllActivations( 03200 (RBMMatrixConnection*) connection ); 03201 03202 if( targetsize() > 0 ) 03203 { 03204 if( use_mean_field_cd ) 03205 target_connection->setAsUpInput( 03206 hidden_layer->expectation ); 03207 else 03208 target_connection->setAsUpInput( 03209 hidden_layer->sample ); 03210 target_layer->getAllActivations( 03211 (RBMMatrixConnection*) target_connection ); 03212 target_layer->computeExpectation(); 03213 if( use_mean_field_cd ) 03214 productAcc( hidden_layer->activation, 03215 target_connection->weights, 03216 target_layer->expectation ); 03217 else 03218 { 03219 target_layer->generateSample(); 03220 productAcc( hidden_layer->activation, 03221 target_connection->weights, 03222 target_layer->sample ); 03223 } 03224 } 03225 03226 hidden_layer->computeExpectation(); 03227 } 03228 03229 if( use_mean_field_cd ) 03230 neg_input = input_layer->expectation; 03231 else 03232 neg_input = input_layer->sample; 03233 03234 neg_hidden = hidden_layer->expectation; 03235 03236 input_layer->update(pos_input,neg_input); 03237 hidden_layer->update(pos_hidden,neg_hidden); 03238 connection->update(pos_input,pos_hidden, 03239 neg_input,neg_hidden); 03240 if( targetsize() > 0 ) 03241 { 03242 if( use_mean_field_cd ) 03243 neg_target = target_layer->expectation; 03244 else 03245 neg_target = target_layer->sample; 03246 target_layer->update(pos_target,neg_target); 03247 target_connection->update(pos_target,pos_hidden, 03248 neg_target,neg_hidden); 03249 } 03250 } 03251 03252 if( !fast_exact_is_equal(persistent_cd_weight, 0.) ) 03253 { 03254 if( use_mean_field_cd ) 03255 PLERROR("In PseudolikelihoodRBM::train(): Persistent " 03256 "Contrastive Divergence was not implemented for " 03257 "MF-CD"); 03258 03259 if( !fast_exact_is_equal(cd_decrease_ct, 0) ) 03260 lr = cd_learning_rate / (1.0 + stage * cd_decrease_ct ); 03261 else 03262 lr = cd_learning_rate; 03263 03264 if( targetsize() > 0 ) 03265 lr *= generative_learning_weight; 03266 03267 lr *= persistent_cd_weight; 03268 03269 if( weightsize > 0 ) 03270 lr *= weight; 03271 03272 setLearningRate(lr); 03273 03274 int chain_i = stage % n_gibbs_chains; 03275 03276 if( !persistent_gibbs_chain_is_started[chain_i] ) 03277 { 03278 // Start gibbs chain 03279 connection->setAsDownInput( input ); 03280 hidden_layer->getAllActivations( 03281 (RBMMatrixConnection*) connection ); 03282 if( targetsize() == 1 ) 03283 productAcc( hidden_layer->activation, 03284 target_connection->weights, 03285 target_one_hot ); 03286 else if( targetsize() > 1 ) 03287 productAcc( hidden_layer->activation, 03288 target_connection->weights, 03289 target ); 03290 03291 hidden_layer->computeExpectation(); 03292 hidden_layer->generateSample(); 03293 pers_cd_hidden[chain_i] << hidden_layer->sample; 03294 persistent_gibbs_chain_is_started[chain_i] = true; 03295 } 03296 03297 if( fast_exact_is_equal(persistent_cd_weight, 1.) ) 03298 { 03299 // Hidden positive sample was not computed previously 03300 connection->setAsDownInput( input ); 03301 hidden_layer->getAllActivations( 03302 (RBMMatrixConnection*) connection ); 03303 if( targetsize() == 1 ) 03304 productAcc( hidden_layer->activation, 03305 target_connection->weights, 03306 target_one_hot ); 03307 else if( targetsize() > 1 ) 03308 productAcc( hidden_layer->activation, 03309 target_connection->weights, 03310 target ); 03311 03312 hidden_layer->computeExpectation(); 03313 pos_hidden << hidden_layer->expectation; 03314 } 03315 03316 hidden_layer->sample << pers_cd_hidden[chain_i]; 03317 // Prolonged Gibbs chain 03318 for(int i=0; i<cd_n_gibbs; i++) 03319 { 03320 connection->setAsUpInput( hidden_layer->sample ); 03321 input_layer->getAllActivations( 03322 (RBMMatrixConnection*) connection ); 03323 input_layer->computeExpectation(); 03324 // LATERAL CONNECTIONS CODE HERE! 03325 input_layer->generateSample(); 03326 connection->setAsDownInput( input_layer->sample ); 03327 hidden_layer->getAllActivations( 03328 (RBMMatrixConnection*) connection ); 03329 if( targetsize() > 0 ) 03330 { 03331 target_connection->setAsUpInput( hidden_layer->sample ); 03332 target_layer->getAllActivations( 03333 (RBMMatrixConnection*) target_connection ); 03334 target_layer->computeExpectation(); 03335 target_layer->generateSample(); 03336 productAcc( hidden_layer->activation, 03337 target_connection->weights, 03338 target_layer->sample ); 03339 } 03340 hidden_layer->computeExpectation(); 03341 hidden_layer->generateSample(); 03342 } 03343 03344 pers_cd_hidden[chain_i] << hidden_layer->sample; 03345 03346 input_layer->update(input, input_layer->sample); 03347 hidden_layer->update(pos_hidden,hidden_layer->expectation); 03348 connection->update(input,pos_hidden, 03349 input_layer->sample, 03350 hidden_layer->expectation); 03351 if( targetsize() > 0 ) 03352 { 03353 target_layer->update(target_one_hot, target_layer->sample); 03354 target_connection->update(target_one_hot,pos_hidden, 03355 target_layer->sample, 03356 hidden_layer->expectation); 03357 } 03358 } 03359 } 03360 } 03361 03362 if( !fast_exact_is_equal(denoising_learning_rate, 0.) && 03363 (targetsize() == 0 || generative_learning_weight > 0) ) 03364 { 03365 if( !fast_exact_is_equal(denoising_decrease_ct, 0) ) 03366 lr = denoising_learning_rate / 03367 (1.0 + stage * denoising_decrease_ct ); 03368 else 03369 lr = denoising_learning_rate; 03370 03371 if( targetsize() > 0 ) 03372 lr *= generative_learning_weight; 03373 03374 if( weightsize > 0 ) 03375 lr *= weight; 03376 03377 setLearningRate(lr); 03378 if( targetsize() > 0 ) 03379 PLERROR("In PseudolikelihoodRBM::train(): denoising " 03380 "autoencoder training is not implemented for " 03381 "targetsize() > 0"); 03382 03383 if( input_is_sparse ) 03384 PLERROR("In PseudolikelihoodRBM::train(): denoising autoencoder " 03385 "training is not implemented for sparse inputs"); 03386 03387 03388 if( fraction_of_masked_inputs > 0 ) 03389 random_gen->shuffleElements(autoencoder_input_indices); 03390 03391 masked_autoencoder_input << input; 03392 if( fraction_of_masked_inputs > 0 ) 03393 { 03394 for( int j=0 ; 03395 j < round(fraction_of_masked_inputs*input_layer->size) ; 03396 j++) 03397 masked_autoencoder_input[ autoencoder_input_indices[j] ] = 0; 03398 } 03399 03400 // Somehow, doesn't compile without the fancy casts... 03401 ((RBMMatrixConnection *)connection)->RBMConnection::fprop( masked_autoencoder_input, 03402 hidden_layer->activation ); 03403 03404 hidden_layer->fprop( hidden_layer->activation, 03405 hidden_layer->expectation ); 03406 03407 transpose_connection->fprop( hidden_layer->expectation, 03408 input_layer->activation ); 03409 input_layer->fprop( input_layer->activation, 03410 input_layer->expectation ); 03411 input_layer->setExpectation( input_layer->expectation ); 03412 03413 real cost = input_layer->fpropNLL(input); 03414 03415 input_layer->bpropNLL(input, cost, 03416 reconstruction_activation_gradient); 03417 if( only_reconstruct_masked_inputs && 03418 fraction_of_masked_inputs > 0 ) 03419 { 03420 for( int j=(int)round(fraction_of_masked_inputs*input_layer->size) ; 03421 j < input_layer->size ; 03422 j++) 03423 reconstruction_activation_gradient[ 03424 autoencoder_input_indices[j] ] = 0; 03425 } 03426 input_layer->update( reconstruction_activation_gradient ); 03427 03428 transpose_connection->bpropUpdate( 03429 hidden_layer->expectation, 03430 input_layer->activation, 03431 hidden_layer_expectation_gradient, 03432 reconstruction_activation_gradient ); 03433 03434 hidden_layer->bpropUpdate( hidden_layer->activation, 03435 hidden_layer->expectation, 03436 hidden_layer_activation_gradient, 03437 hidden_layer_expectation_gradient ); 03438 03439 connection->bpropUpdate( masked_autoencoder_input, 03440 hidden_layer->activation, 03441 reconstruction_activation_gradient, // is not used afterwards... 03442 hidden_layer_activation_gradient ); 03443 } 03444 03445 // } 03446 train_stats->update( train_costs ); 03447 03448 } 03449 03450 Profiler::end("training"); 03451 const Profiler::Stats& stats = Profiler::getStats("training"); 03452 real ticksPerSec = Profiler::ticksPerSecond(); 03453 real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec; 03454 cumulative_training_time += cpu_time; 03455 03456 train_costs.fill(MISSING_VALUE); 03457 train_costs[training_cpu_time_cost_index] = cpu_time; 03458 train_costs[cumulative_training_time_cost_index] = cumulative_training_time; 03459 train_stats->update( train_costs ); 03460 03461 //cout << "mean_pseudolikelihood=" << mean_pseudolikelihood / (stage - init_stage) << endl; 03462 // Sums to 1 test 03463 //compute_Z(); 03464 //conf.resize( input_layer->size ); 03465 //Vec output,costs; 03466 //output.resize(outputsize()); 03467 //costs.resize(getTestCostNames().length()); 03468 //target.resize( targetsize() ); 03469 //real sums = 0; 03470 //int input_n_conf = input_layer->getConfigurationCount(); 03471 //for(int i=0; i<input_n_conf; i++) 03472 //{ 03473 // input_layer->getConfiguration(i,conf); 03474 // computeOutput(conf,output); 03475 // computeCostsFromOutputs( conf, output, target, costs ); 03476 // if( i==0 ) 03477 // sums = -costs[nll_cost_index]; 03478 // else 03479 // sums = logadd( sums, -costs[nll_cost_index] ); 03480 // //sums += safeexp( -costs[nll_cost_index] ); 03481 //} 03482 //cout << "sums: " << safeexp(sums) << endl; 03483 // //sums << endl; 03484 train_stats->finalize(); 03485 } 03486 03487 void PseudolikelihoodRBM::test(VMat testset, PP<VecStatsCollector> test_stats, 03488 VMat testoutputs, VMat testcosts) const 03489 { 03490 if( !input_is_sparse ) 03491 { 03492 inherited::test( testset, test_stats, testoutputs, testcosts ); 03493 return; 03494 } 03495 03496 Profiler::pl_profile_start("PLearner::test"); 03497 03498 int len = testset.length(); 03499 Vec input; 03500 Vec target; 03501 Vec extra; 03502 real weight; 03503 int out_size = outputsize() >= 0 ? outputsize() : 0; 03504 int target_index; 03505 03506 if( targetsize() <= 0 ) 03507 PLERROR("PseudolikelihoodRBM::test(): targetsize() must be " 03508 "> 0 for sparse inputs"); 03509 03510 Vec output(out_size); 03511 Vec costs(nTestCosts()); 03512 03513 if (test_stats) { 03514 // Set names of test_stats costs 03515 test_stats->setFieldNames(getTestCostNames()); 03516 03517 if (len == 0) { 03518 // Empty test set: we give -1 cost arbitrarily. 03519 costs.fill(-1); 03520 test_stats->update(costs); 03521 } 03522 } 03523 03524 PP<ProgressBar> pb; 03525 if (report_progress) 03526 pb = new ProgressBar("Testing learner", len); 03527 03528 PP<PRandom> copy_random_gen=0; 03529 if (use_a_separate_random_generator_for_testing && random_gen) 03530 { 03531 CopiesMap copies; 03532 copy_random_gen = random_gen->deepCopy(copies); 03533 random_gen->manual_seed(use_a_separate_random_generator_for_testing); 03534 } 03535 03536 Vec target_act = target_layer->activation; 03537 Vec hidden_act = hidden_layer->activation; 03538 for (int l = 0; l < len; l++) 03539 { 03540 testset.getExample(l, input, target, weight); 03541 testset->getExtra(l, extra ); 03542 03543 if( targetsize() == 1 ) 03544 { 03545 03546 target_one_hot.clear(); 03547 target_index = (int)round( target[0] ); 03548 target_one_hot[ target_index ] = 1; 03549 03550 if( factorized_connection_rank > 0 ) 03551 { 03552 Vx.clear(); 03553 for( int e=0; e<extra.length(); e++ ) 03554 Vx += V((int)extra[e]); 03555 03556 product(hidden_act,U,Vx); 03557 } 03558 else 03559 { 03560 hidden_act.clear(); 03561 for( int e=0; e<extra.length(); e++ ) 03562 hidden_act += V((int)extra[e]); 03563 } 03564 hidden_act += hidden_layer->bias; 03565 03566 for( int i=0 ; i<target_layer->size ; i++ ) 03567 { 03568 target_act[i] = target_layer->bias[i]; 03569 // LATERAL CONNECTIONS CODE HERE!! 03570 real *w = &(target_connection->weights(0,i)); 03571 // step from one row to the next in weights matrix 03572 int m = target_connection->weights.mod(); 03573 03574 for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 03575 { 03576 // *w = weights(j,i) 03577 hidden_activation_pos_i[j] = hidden_act[j] + *w; 03578 } 03579 target_act[i] -= hidden_layer->freeEnergyContribution( 03580 hidden_activation_pos_i); 03581 } 03582 03583 target_layer->expectation_is_up_to_date = false; 03584 target_layer->computeExpectation(); 03585 output << target_layer->expectation; 03586 real nll = target_layer->fpropNLL(target_one_hot); 03587 costs.fill( MISSING_VALUE ); 03588 costs[nll_cost_index] = nll; 03589 costs[class_cost_index] = 03590 (argmax(target_layer->expectation) == target_index)? 0 : 1; 03591 } 03592 else if( targetsize() > 1 ) 03593 PLERROR("PseudolikelihoodRBM::test(): targetsize() > 1 " 03594 "not implemented yet for sparse inputs"); 03595 costs[cumulative_training_time_cost_index] = cumulative_training_time; 03596 if (testoutputs) testoutputs->putOrAppendRow(l, output); 03597 if (testcosts) testcosts->putOrAppendRow(l, costs); 03598 if (test_stats) test_stats->update(costs, weight); 03599 if (report_progress) pb->update(l); 03600 } 03601 03602 if (use_a_separate_random_generator_for_testing && random_gen) 03603 *random_gen = *copy_random_gen; 03604 03605 Profiler::pl_profile_end("PLearner::test"); 03606 03607 } 03608 03610 // computeOutput // 03612 void PseudolikelihoodRBM::computeOutput(const Vec& input, Vec& output) const 03613 { 03614 if( input_is_sparse ) 03615 PLERROR("In PseudolikelihoodRBM::computeOutput(): " 03616 "not compatible with sparse inputs"); 03617 03618 // Compute the output from the input. 03619 if( targetsize() == 1 ) 03620 { 03621 // Get output probabilities 03622 connection->setAsDownInput( input ); 03623 hidden_layer->getAllActivations( 03624 (RBMMatrixConnection*) connection ); 03625 03626 Vec target_act = target_layer->activation; 03627 Vec hidden_act = hidden_layer->activation; 03628 for( int i=0 ; i<target_layer->size ; i++ ) 03629 { 03630 target_act[i] = target_layer->bias[i]; 03631 // LATERAL CONNECTIONS CODE HERE!! 03632 real *w = &(target_connection->weights(0,i)); 03633 // step from one row to the next in weights matrix 03634 int m = target_connection->weights.mod(); 03635 03636 for( int j=0 ; j<hidden_layer->size ; j++, w+=m ) 03637 { 03638 // *w = weights(j,i) 03639 hidden_activation_pos_i[j] = hidden_act[j] + *w; 03640 } 03641 target_act[i] -= hidden_layer->freeEnergyContribution( 03642 hidden_activation_pos_i); 03643 } 03644 03645 target_layer->expectation_is_up_to_date = false; 03646 target_layer->computeExpectation(); 03647 output << target_layer->expectation; 03648 } 03649 else if(targetsize() > 1 ) 03650 { 03651 PLERROR("In PseudolikelihoodRBM::computeOutput(): not implemented yet for\n" 03652 "targetsize() > 1"); 03653 } 03654 else 03655 { 03656 // Get hidden layer representation 03657 connection->setAsDownInput( input ); 03658 hidden_layer->getAllActivations( (RBMMatrixConnection *) connection ); 03659 hidden_layer->computeExpectation(); 03660 output << hidden_layer->expectation; 03661 } 03662 } 03663 03664 03665 void PseudolikelihoodRBM::computeCostsFromOutputs(const Vec& input, 03666 const Vec& output, 03667 const Vec& target, 03668 Vec& costs) const 03669 { 03670 03671 if( input_is_sparse ) 03672 PLERROR("In PseudolikelihoodRBM::computeCostsFromOutputs(): " 03673 "not compatible with sparse inputs"); 03674 03675 // Compute the costs from *already* computed output. 03676 costs.resize( cost_names.length() ); 03677 costs.fill( MISSING_VALUE ); 03678 03679 if( targetsize() == 1 ) 03680 { 03681 if( !is_missing(target[0]) ) 03682 { 03683 costs[class_cost_index] = 03684 (argmax(output) == (int) round(target[0]))? 0 : 1; 03685 costs[nll_cost_index] = -pl_log(output[(int) round(target[0])]); 03686 } 03687 } 03688 else if( targetsize() > 1 ) 03689 { 03690 PLERROR("In PseudolikelihoodRBM::computeCostsFromOutputs(): not implemented yet for\n" 03691 "targetsize() > 1"); 03692 } 03693 else 03694 { 03695 if( compute_input_space_nll ) 03696 { 03697 compute_Z(); 03698 connection->setAsDownInput( input ); 03699 hidden_layer->getAllActivations( (RBMMatrixConnection *) connection ); 03700 costs[nll_cost_index] = hidden_layer->freeEnergyContribution( 03701 hidden_layer->activation) - dot(input,input_layer->bias); 03702 if( compute_Z_exactly ) 03703 costs[nll_cost_index] += log_Z; 03704 else if( use_ais_to_compute_Z ) 03705 costs[nll_cost_index] += log_Z_ais; 03706 else 03707 PLERROR("In PseudolikelihoodRBM::computeCostsFromOutputs(): " 03708 "can't compute NLL without a mean to compute log(Z)."); 03709 03710 if( compute_Z_exactly ) 03711 { 03712 costs[log_Z_cost_index] = log_Z; 03713 } 03714 if( use_ais_to_compute_Z ) 03715 { 03716 costs[log_Z_ais_cost_index] = log_Z_ais; 03717 costs[log_Z_interval_lower_cost_index] = log_Z_down; 03718 costs[log_Z_interval_upper_cost_index] = log_Z_up; 03719 } 03720 } 03721 } 03722 costs[cumulative_training_time_cost_index] = cumulative_training_time; 03723 } 03724 03725 TVec<string> PseudolikelihoodRBM::getTestCostNames() const 03726 { 03727 // Return the names of the costs computed by computeCostsFromOutputs 03728 // (these may or may not be exactly the same as what's returned by 03729 // getTrainCostNames). 03730 03731 return cost_names; 03732 } 03733 03734 TVec<string> PseudolikelihoodRBM::getTrainCostNames() const 03735 { 03736 return cost_names; 03737 } 03738 03739 03740 //##### Helper functions ################################################## 03741 03742 void PseudolikelihoodRBM::setLearningRate( real the_learning_rate ) 03743 { 03744 input_layer->setLearningRate( the_learning_rate ); 03745 hidden_layer->setLearningRate( the_learning_rate ); 03746 if( connection ) 03747 connection->setLearningRate( the_learning_rate ); 03748 if( target_layer ) 03749 target_layer->setLearningRate( the_learning_rate ); 03750 if( target_connection ) 03751 target_connection->setLearningRate( the_learning_rate ); 03752 } 03753 03754 void PseudolikelihoodRBM::compute_Z() const 03755 { 03756 03757 int input_n_conf = input_layer->getConfigurationCount(); 03758 int hidden_n_conf = hidden_layer->getConfigurationCount(); 03759 if( !Z_is_up_to_date && compute_Z_exactly && 03760 input_n_conf == RBMLayer::INFINITE_CONFIGURATIONS && 03761 hidden_n_conf == RBMLayer::INFINITE_CONFIGURATIONS ) 03762 PLERROR("In PseudolikelihoodRBM::computeCostsFromOutputs: " 03763 "RBM's input and hidden layers are too big " 03764 "for exact NLL computations."); 03765 03766 if( !Z_ais_is_up_to_date && use_ais_to_compute_Z ) 03767 { 03768 log_Z_ais = 0; 03769 // This AIS code is based on the Matlab code of Russ, on his web page // 03770 03771 // Compute base-rate RBM biases 03772 Vec input( inputsize() ); 03773 Vec target( targetsize() ); 03774 real weight; 03775 Vec base_rate_rbm_bias( inputsize() ); 03776 base_rate_rbm_bias.clear(); 03777 for( int i=0; i<train_set->length(); i++ ) 03778 { 03779 train_set->getExample(i, input, target, weight); 03780 base_rate_rbm_bias += input; 03781 } 03782 base_rate_rbm_bias += 0.05*train_set->length(); 03783 base_rate_rbm_bias /= 1.05*train_set->length(); 03784 for( int j=0; j<inputsize(); j++ ) 03785 base_rate_rbm_bias[j] = pl_log( base_rate_rbm_bias[j] ) - 03786 pl_log( 1-base_rate_rbm_bias[j] ); 03787 03788 Mat ais_chain_init_samples( n_ais_chains,inputsize() ); 03789 Vec ais_weights( n_ais_chains ); 03790 ais_weights.clear(); // we'll work on log-scale 03791 real beg_beta, end_beta, beta, step_beta; 03792 int n_beta; 03793 03794 // Start chains 03795 real p_j; 03796 for( int j=0; j<input_layer->size; j++ ) 03797 { 03798 p_j = sigmoid( base_rate_rbm_bias[j] ); 03799 for( int c=0; c<n_ais_chains; c++ ) 03800 ais_chain_init_samples(c,j) = random_gen->binomial_sample( p_j ); 03801 } 03802 input_layer->setBatchSize( n_ais_chains ); 03803 input_layer->samples << ais_chain_init_samples; 03804 03805 // Add importance weight contribution (denominator) 03806 productScaleAcc( ais_weights, input_layer->samples, false, 03807 base_rate_rbm_bias, -1, 0 ); 03808 ais_weights -= hidden_layer->size * pl_log(2); 03809 for( int k=0; k<ais_beta_n_steps.length(); k++ ) 03810 { 03811 beg_beta = (k==0) ? 0 : ais_beta_begin[k]; 03812 end_beta = (k == ais_beta_end.length()-1) ? 1 : ais_beta_end[k]; 03813 if( beg_beta >= end_beta ) 03814 PLERROR("In PseudolikelihoodRBM::compute_Z(): " 03815 "the AIS beta schedule is not monotonically increasing."); 03816 03817 n_beta = ais_beta_n_steps[k]; 03818 if( n_beta == 0) 03819 PLERROR("In PseudolikelihoodRBM::compute_Z(): " 03820 "one of the beta intervals has 0 steps."); 03821 step_beta = (end_beta - beg_beta)/n_beta; 03822 03823 beta = beg_beta; 03824 for( int k_i=0; k_i < n_beta; k_i++ ) 03825 { 03826 beta += step_beta; 03827 // Add importance weight contribution (numerator) 03828 productScaleAcc( ais_weights, input_layer->samples, false, 03829 base_rate_rbm_bias, (1-beta), 1 ); 03830 productScaleAcc( ais_weights, input_layer->samples, false, 03831 input_layer->bias, beta, 1 ); 03832 connection->setAsDownInputs(input_layer->samples); 03833 hidden_layer->getAllActivations( 03834 (RBMMatrixConnection *) connection, 0, true ); 03835 hidden_layer->activations *= beta; 03836 for( int c=0; c<n_ais_chains; c++ ) 03837 ais_weights[c] -= hidden_layer->freeEnergyContribution( 03838 hidden_layer->activations(c) ); 03839 // Get new chain sample 03840 hidden_layer->computeExpectations(); 03841 hidden_layer->generateSamples(); 03842 connection->setAsUpInputs(hidden_layer->samples); 03843 input_layer->getAllActivations( 03844 (RBMMatrixConnection *) connection, 0, true ); 03845 for( int c=0; c<n_ais_chains; c++ ) 03846 multiplyScaledAdd(base_rate_rbm_bias,beta, 03847 (1-beta),input_layer->activations(c)); 03848 input_layer->computeExpectations(); 03849 input_layer->generateSamples(); 03850 03851 // Add importance weight contribution (denominator) 03852 productScaleAcc( ais_weights, input_layer->samples, false, 03853 base_rate_rbm_bias, -(1-beta), 1 ); 03854 productScaleAcc( ais_weights, input_layer->samples, false, 03855 input_layer->bias, -beta, 1 ); 03856 connection->setAsDownInputs(input_layer->samples); 03857 hidden_layer->getAllActivations( 03858 (RBMMatrixConnection *) connection, 0, true ); 03859 hidden_layer->activations *= beta; 03860 for( int c=0; c<n_ais_chains; c++ ) 03861 ais_weights[c] += hidden_layer->freeEnergyContribution( 03862 hidden_layer->activations(c) ); 03863 } 03864 } 03865 // Final importance weight contribution, at beta=1 (numerator) 03866 productScaleAcc( ais_weights, input_layer->samples, false, 03867 input_layer->bias, 1, 1 ); 03868 connection->setAsDownInputs(input_layer->samples); 03869 hidden_layer->getAllActivations( 03870 (RBMMatrixConnection *) connection, 0, true ); 03871 for( int c=0; c<n_ais_chains; c++ ) 03872 ais_weights[c] -= hidden_layer->freeEnergyContribution( 03873 hidden_layer->activations(c) ); 03874 03875 real log_r_ais = logadd(ais_weights) - pl_log(n_ais_chains); 03876 real log_Z_base = hidden_layer->size * pl_log(2); 03877 for( int j=0; j<inputsize(); j++ ) 03878 log_Z_base += softplus(base_rate_rbm_bias[j]); 03879 log_Z_ais = log_r_ais + log_Z_base; 03880 03881 real offset = mean(ais_weights); 03882 PP<StatsCollector> stats = new StatsCollector(); 03883 stats->forget(); 03884 for( int c=0; c<n_ais_chains; c++ ) 03885 stats->update(exp(ais_weights[c]-offset),1.); 03886 stats->finalize(); 03887 real logstd_ais = pl_log(stats->getStat("STDDEV")) + 03888 offset - pl_log(n_ais_chains)/2; 03889 log_Z_up = pl_log(exp(log_r_ais)+exp(logstd_ais)*3) + log_Z_base; 03890 log_Z_down = pl_log(exp(log_r_ais)-exp(logstd_ais)*3) + log_Z_base; 03891 03892 Z_ais_is_up_to_date = true; 03893 } 03894 if( !Z_is_up_to_date && compute_Z_exactly ) 03895 { 03896 log_Z = 0; 03897 if( input_n_conf < hidden_n_conf ) 03898 { 03899 conf.resize( input_layer->size ); 03900 for(int i=0; i<input_n_conf; i++) 03901 { 03902 input_layer->getConfiguration(i,conf); 03903 connection->setAsDownInput( conf ); 03904 hidden_layer->getAllActivations( (RBMMatrixConnection *) connection ); 03905 if( i == 0 ) 03906 log_Z = -hidden_layer->freeEnergyContribution( 03907 hidden_layer->activation) + dot(conf,input_layer->bias); 03908 else 03909 log_Z = logadd(-hidden_layer->freeEnergyContribution( 03910 hidden_layer->activation) 03911 + dot(conf,input_layer->bias), 03912 log_Z); 03913 } 03914 } 03915 else 03916 { 03917 conf.resize( hidden_layer->size ); 03918 for(int i=0; i<hidden_n_conf; i++) 03919 { 03920 hidden_layer->getConfiguration(i,conf); 03921 connection->setAsUpInput( conf ); 03922 input_layer->getAllActivations( (RBMMatrixConnection *) connection ); 03923 if( i == 0 ) 03924 log_Z = -input_layer->freeEnergyContribution( 03925 input_layer->activation) + dot(conf,hidden_layer->bias); 03926 else 03927 log_Z = logadd(-input_layer->freeEnergyContribution( 03928 input_layer->activation) 03929 + dot(conf,hidden_layer->bias), 03930 log_Z); 03931 } 03932 } 03933 Z_is_up_to_date = true; 03934 } 03935 } 03936 03937 } // end of namespace PLearn 03938 03939 03940 /* 03941 Local Variables: 03942 mode:c++ 03943 c-basic-offset:4 03944 c-file-style:"stroustrup" 03945 c-file-offsets:((innamespace . 0)(inline-open . 0)) 03946 indent-tabs-mode:nil 03947 fill-column:79 03948 End: 03949 */ 03950 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :