PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // ModuleLearner.cc 00004 // 00005 // Copyright (C) 2007 Olivier Delalleau 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Olivier Delalleau 00036 00040 #define PL_LOG_MODULE_NAME "ModuleLearner" 00041 00042 #include "ModuleLearner.h" 00043 #include <plearn_learners/online/NullModule.h> 00044 #include <plearn/io/pl_log.h> 00045 #include <plearn/var/SumOverBagsVariable.h> 00046 00047 namespace PLearn { 00048 using namespace std; 00049 00050 PLEARN_IMPLEMENT_OBJECT( 00051 ModuleLearner, 00052 "A PLearner that contains a single OnlineLearningModule.\n", 00053 "That module should have ports that can be fed with the input, target\n" 00054 "and weight of an example (defined by the 'input_ports', 'target_ports'\n" 00055 "and 'weight_ports' options), ports that compute costs (defined by the\n" 00056 "'cost_ports' option), and a port that computes the output of this\n" 00057 "learner (whose name is given by the 'output_port' option).\n" 00058 "\n" 00059 "For example one can use a NetworkModule, which can define such ports.\n" 00060 "\n" 00061 "The input and target from the training VMatrix are plugged on their\n" 00062 "corresponding ports, and the output (for ComputeOutput) and cost (for\n" 00063 "ComputeOutputAndCost and for training) are obtained from the 'output'\n" 00064 "port and the ports defined by the 'cost_ports' option.\n" 00065 "\n" 00066 "During training gradient is propagated from the first cost (only) and\n" 00067 "the bpropUpdate() method of the module is called (possibly one mini-\n" 00068 "batch of examples at a time), so as to update the internal parameters\n" 00069 "of the module. During ComputeOutput, it is not necessary to provide a\n" 00070 "target in order to obtain an output.\n" 00071 ); 00072 00074 // ModuleLearner // 00076 ModuleLearner::ModuleLearner(): 00077 batch_size(1), 00078 cost_ports(TVec<string>(1, "cost")), 00079 input_ports(TVec<string>(1, "input")), 00080 target_ports(TVec<string>(1, "target")), 00081 output_port("output"), 00082 // Note: many learners do not use weights, thus the default behavior is not 00083 // to have a 'weight' port in 'weight_ports'. 00084 operate_on_bags(false), 00085 reset_seed_upon_train(0), 00086 mbatch_size(-1) 00087 { 00088 random_gen = new PRandom(); 00089 test_minibatch_size = 1000; 00090 } 00091 00093 // declareOptions // 00095 void ModuleLearner::declareOptions(OptionList& ol) 00096 { 00097 declareOption(ol, "module", &ModuleLearner::module, 00098 OptionBase::buildoption, 00099 "The module being optimized."); 00100 00101 declareOption(ol, "batch_size", &ModuleLearner::batch_size, 00102 OptionBase::buildoption, 00103 "User-specified number of samples fed to the network at each iteration of learning.\n" 00104 "Use '0' for full batch learning."); 00105 00106 declareOption(ol, "reset_seed_upon_train", &ModuleLearner::reset_seed_upon_train, 00107 OptionBase::buildoption, 00108 "Whether to reset the random generator seed upon starting the train\n" 00109 "method. If positive this is the seed. If -1 use the value of the\n" 00110 "option 'use_a_separate_random_generator_for_testing'.\n"); 00111 00112 declareOption(ol, "cost_ports", &ModuleLearner::cost_ports, 00113 OptionBase::buildoption, 00114 "List of ports that contain costs being computed (the first cost is\n" 00115 "also the only one being optimized by this learner)."); 00116 00117 declareOption(ol, "input_ports", &ModuleLearner::input_ports, 00118 OptionBase::buildoption, 00119 "List of ports that take the input part of a sample as input."); 00120 00121 declareOption(ol, "target_ports", &ModuleLearner::target_ports, 00122 OptionBase::buildoption, 00123 "List of ports that take the target part of a sample as input."); 00124 00125 declareOption(ol, "weight_ports", &ModuleLearner::weight_ports, 00126 OptionBase::buildoption, 00127 "List of ports that take the weight part of a sample as input."); 00128 00129 declareOption(ol, "output_port", &ModuleLearner::output_port, 00130 OptionBase::buildoption, 00131 "The port that will contain the output of the learner."); 00132 00133 declareOption(ol, "operate_on_bags", &ModuleLearner::operate_on_bags, 00134 OptionBase::buildoption, 00135 "If true, then each training step will be done on batch_size *bags*\n" 00136 "of samples (instead of batch_size samples)."); 00137 00138 declareOption(ol, "mbatch_size", &ModuleLearner::mbatch_size, 00139 OptionBase::learntoption, 00140 "Effective 'batch_size': it takes the same value as 'batch_size'\n" 00141 "except when 'batch_size' is set to 0, and this\n" 00142 "option takes the value of the size of the training set."); 00143 00144 // Now call the parent class' declareOptions 00145 inherited::declareOptions(ol); 00146 } 00147 00149 // build_ // 00151 void ModuleLearner::build_() 00152 { 00153 if (!module) 00154 // Cannot do anything without an underlying module. 00155 return; 00156 00157 // Forward random number generator to underlying module. 00158 if (!module->random_gen) { 00159 module->random_gen = random_gen; 00160 module->build(); 00161 module->forget(); 00162 } 00163 00164 // Create a new NetworkModule that connects the ports of the underlying 00165 // module to simple MatrixModules that will provide/store data. 00166 const TVec<string>& ports = module->getPorts(); 00167 TVec< PP<OnlineLearningModule> > all_modules; 00168 all_modules.append(module); 00169 TVec< PP<NetworkConnection> > all_connections; 00170 store_inputs = store_targets = store_weights = NULL; 00171 00172 for (int i = 0; i < input_ports.length(); i++) { 00173 if (!store_inputs) { 00174 store_inputs = new MatrixModule("store_inputs", true); 00175 all_modules.append(get_pointer(store_inputs)); 00176 } 00177 all_connections.append(new NetworkConnection( 00178 get_pointer(store_inputs), "data", 00179 module, input_ports[i], false)); 00180 } 00181 00182 for (int i = 0; i < target_ports.length(); i++) { 00183 if (!store_targets) { 00184 store_targets = new MatrixModule("store_targets", true); 00185 all_modules.append(get_pointer(store_targets)); 00186 } 00187 all_connections.append(new NetworkConnection( 00188 get_pointer(store_targets), "data", 00189 module, target_ports[i], false)); 00190 } 00191 00192 for (int i = 0; i < weight_ports.length(); i++) { 00193 if (!store_weights) { 00194 store_weights = new MatrixModule("store_weights", true); 00195 all_modules.append(get_pointer(store_weights)); 00196 } 00197 all_connections.append(new NetworkConnection( 00198 get_pointer(store_weights), "data", 00199 module, weight_ports[i], false)); 00200 } 00201 00202 if (ports.find(output_port) >= 0) { 00203 store_outputs = new MatrixModule("store_outputs", true); 00204 all_modules.append(get_pointer(store_outputs)); 00205 all_connections.append(new NetworkConnection( 00206 module, output_port, 00207 get_pointer(store_outputs), "data", false)); 00208 } else 00209 store_outputs = NULL; 00210 00211 store_costs.resize(0); 00212 for (int i = 0; i < cost_ports.length(); i++) { 00213 const string& cost_port = cost_ports[i]; 00214 PLCHECK( ports.find(cost_port) >= 0 ); 00215 PP<MatrixModule> store = new MatrixModule("store_costs_" + tostring(i), 00216 true); 00217 all_modules.append(get_pointer(store)); 00218 // Note that only the first connection propagates the gradient (we 00219 // only optimize the first cost). 00220 all_connections.append(new NetworkConnection( 00221 module, cost_port, 00222 get_pointer(store), "data", i == 0)); 00223 store_costs.append(store); 00224 } 00225 00226 network = new NetworkModule(); 00227 network->modules = all_modules; 00228 network->connections = all_connections; 00229 network->build(); 00230 00231 // Initialize the list of null pointers used for forward and backward 00232 // propagation. 00233 null_pointers.resize(module->nPorts()); 00234 null_pointers.fill(NULL); 00235 } 00236 00238 // build // 00240 void ModuleLearner::build() 00241 { 00242 inherited::build(); 00243 build_(); 00244 } 00245 00247 // makeDeepCopyFromShallowCopy // 00249 void ModuleLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00250 { 00251 inherited::makeDeepCopyFromShallowCopy(copies); 00252 deepCopyField(module, copies); 00253 deepCopyField(cost_ports, copies); 00254 deepCopyField(input_ports, copies); 00255 deepCopyField(target_ports, copies); 00256 deepCopyField(weight_ports, copies); 00257 deepCopyField(store_inputs, copies); 00258 deepCopyField(store_targets, copies); 00259 deepCopyField(store_weights, copies); 00260 deepCopyField(store_outputs, copies); 00261 deepCopyField(store_costs, copies); 00262 deepCopyField(network, copies); 00263 deepCopyField(null_pointers, copies); 00264 deepCopyField(all_ones, copies); 00265 deepCopyField(tmp_costs, copies); 00266 } 00267 00269 // outputsize // 00271 int ModuleLearner::outputsize() const 00272 { 00273 if ( module && store_outputs ) 00274 return module->getPortWidth(output_port); 00275 else 00276 return -1; // Undefined. 00277 } 00278 00280 // forget // 00282 void ModuleLearner::forget() 00283 { 00284 inherited::forget(); 00285 00286 if (module) 00287 module->forget(); 00288 00289 mbatch_size = -1; 00290 } 00291 00293 // train // 00295 void ModuleLearner::train() 00296 { 00297 if (!initTrain()) 00298 return; 00299 00300 if (reset_seed_upon_train) 00301 { 00302 if (reset_seed_upon_train>0) 00303 random_gen->manual_seed(reset_seed_upon_train); 00304 else if (reset_seed_upon_train==-1) 00305 random_gen->manual_seed(use_a_separate_random_generator_for_testing); 00306 else PLERROR("ModuleLearner::reset_seed_upon_train should be >=-1"); 00307 } 00308 OnlineLearningModule::during_training=true; 00309 00310 // Perform training set-dependent initialization here. 00311 if (batch_size == 0) 00312 mbatch_size = train_set->length(); 00313 else 00314 mbatch_size = batch_size; 00315 if (train_set->weightsize() >= 1 && !store_weights) 00316 PLWARNING("In ModuleLearner::train - The training set contains " 00317 "weights, but the network is not using them"); 00318 00319 Mat inputs, targets; 00320 Vec weights; 00321 PP<ProgressBar> pb = NULL; 00322 00323 // clear statistics of previous calls 00324 train_stats->forget(); 00325 00326 int stage_init = stage; 00327 if (report_progress) 00328 pb = new ProgressBar( "Training " + classname(), nstages - stage); 00329 00330 if( operate_on_bags && batch_size>0 ) 00331 while ( stage < nstages ) { 00332 // Obtain training samples. 00333 int sample_start = stage % train_set->length(); 00334 int isample = sample_start; 00335 inputs.resize(0,0); 00336 targets.resize(0,0); 00337 weights.resize(0); 00338 for( int nbags = 0; nbags < mbatch_size; nbags++ ) { 00339 int bag_info = 0; 00340 while( !(bag_info & SumOverBagsVariable::TARGET_COLUMN_LAST) ) { 00341 PLASSERT( isample < train_set->length() ); 00342 Vec input, target; real weight; 00343 train_set->getExample(isample, input, target, weight); 00344 inputs.appendRow(input); 00345 targets.appendRow(target); 00346 weights.append( weight ); 00347 bag_info = int(round(target.lastElement())); 00348 isample ++; 00349 } 00350 isample = isample % train_set->length(); 00351 } 00352 if( stage + inputs.length() > nstages ) 00353 break; 00354 // Perform a training step. 00355 trainingStep(inputs, targets, weights); 00356 // Handle training progress. 00357 stage += inputs.length(); 00358 if (report_progress) 00359 pb->update(stage - stage_init); 00360 } 00361 else 00362 while (stage + mbatch_size <= nstages) { 00363 // Obtain training samples. 00364 int sample_start = stage % train_set->length(); 00365 train_set->getExamples(sample_start, mbatch_size, inputs, targets, 00366 weights, NULL, true); 00367 // Perform a training step. 00368 trainingStep(inputs, targets, weights); 00369 // Handle training progress. 00370 stage += mbatch_size; 00371 if (report_progress) 00372 pb->update(stage - stage_init); 00373 } 00374 if (stage != nstages) 00375 { 00376 if( operate_on_bags && batch_size>0 ) 00377 PLWARNING("In ModuleLearner::train - The network was trained for " 00378 "only %d stages (instead of nstages = %d, which could not " 00379 "be fulfilled with batch_size of %d bags)", stage, nstages, batch_size); 00380 else 00381 PLWARNING("In ModuleLearner::train - The network was trained for " 00382 "only %d stages (instead of nstages = %d, which is not a " 00383 "multiple of batch_size = %d)", stage, nstages, batch_size); 00384 } 00385 OnlineLearningModule::during_training=false; 00386 00387 // finalize statistics for this call 00388 train_stats->finalize(); 00389 } 00390 00392 // trainingStep // 00394 void ModuleLearner::trainingStep(const Mat& inputs, const Mat& targets, 00395 const Vec& weights) 00396 { 00397 // Fill in the provided batch values (only if they are actually used by the 00398 // network). 00399 if (store_inputs) 00400 store_inputs->setData(inputs); 00401 if (store_targets) 00402 store_targets->setData(targets); 00403 if (store_weights) 00404 store_weights->setData(weights.toMat(weights.length(), 1)); 00405 00406 // Forward propagation. 00407 network->fprop(null_pointers); 00408 00409 // Copy the costs into a single matrix. 00410 // First compute total size. 00411 int cost_size = 0; 00412 for (int i = 0; i < store_costs.length(); i++) 00413 cost_size += store_costs[i]->getData().width(); 00414 // Then resize the 'tmp_costs' matrix and fill it. 00415 tmp_costs.resize(inputs.length(), cost_size); 00416 int cost_idx = 0; 00417 for (int i = 0; i < store_costs.length(); i++) { 00418 const Mat& cost_i = store_costs[i]->getData(); 00419 PLASSERT( cost_i.length() == tmp_costs.length() ); 00420 tmp_costs.subMatColumns(cost_idx, cost_i.width()) << cost_i; 00421 cost_idx += cost_i.width(); 00422 } 00423 00424 // Then update the training statistics. 00425 train_stats->update(tmp_costs); 00426 00427 // Initialize cost gradients to 1. 00428 // Note that we may not need to re-do it at every iteration, but this is so 00429 // cheap it should not impact performance. 00430 if (!store_costs.isEmpty()) 00431 store_costs[0]->setGradientTo(1); 00432 00433 // Backpropagation. 00434 network->bpropAccUpdate(null_pointers, null_pointers); 00435 } 00436 00438 // computeOutputAndCosts // 00440 void ModuleLearner::computeOutputAndCosts(const Vec& input, const Vec& target, 00441 Vec& output, Vec& costs) const 00442 { 00443 if (store_inputs) 00444 store_inputs->setData(input.toMat(1, input.length())); 00445 if (store_targets) 00446 store_targets->setData(target.toMat(1, target.length())); 00447 if (store_weights) { 00448 all_ones.resize(1, 1); 00449 all_ones(0, 0) = 1; 00450 store_weights->setData(all_ones); 00451 } 00452 00453 // Forward propagation. 00454 network->fprop(null_pointers); 00455 00456 // Store output. 00457 if (store_outputs) { 00458 const Mat& net_out = store_outputs->getData(); 00459 PLASSERT( net_out.length() == 1 ); 00460 output.resize(net_out.width()); 00461 output << net_out; 00462 } else 00463 output.resize(0); 00464 00465 // Store costs. 00466 costs.resize(0); 00467 for (int i = 0; i < store_costs.length(); i++) { 00468 const Mat& cost_i = store_costs[i]->getData(); 00469 PLASSERT( cost_i.length() == 1 ); 00470 costs.append(cost_i(0)); 00471 } 00472 } 00473 00475 // computeOutputsAndCosts // 00477 void ModuleLearner::computeOutputsAndCosts(const Mat& input, const Mat& target, 00478 Mat& output, Mat& costs) const 00479 { 00480 static Mat one; 00481 if (store_inputs) 00482 store_inputs->setData(input); 00483 if (store_targets) 00484 store_targets->setData(target); 00485 if (store_weights) { 00486 if (all_ones.width() != 1 || all_ones.length() != input.length()) { 00487 all_ones.resize(input.length(), 1); 00488 all_ones.fill(1.0); 00489 } 00490 store_weights->setData(all_ones); 00491 } 00492 // Make the store_output temporarily point to output 00493 Mat old_net_out; 00494 Mat* net_out = store_outputs ? &store_outputs->getData() 00495 : NULL; 00496 output.resize(input.length(),outputsize() >= 0 ? outputsize() : 0); 00497 if (net_out) { 00498 old_net_out = *net_out; 00499 *net_out = output; 00500 } 00501 00502 // Forward propagation. 00503 network->fprop(null_pointers); 00504 00505 // Restore store_outputs. 00506 if (net_out) 00507 *net_out = old_net_out; 00508 00509 if (!store_costs) { 00510 // Do not bother with costs. 00511 costs.resize(input.length(), 0); 00512 return; 00513 } 00514 00515 // Copy costs. 00516 // Note that a more efficient implementation may be done when only one cost 00517 // is computed (see code in previous version). 00518 // First compute total size. 00519 int cost_size = 0; 00520 for (int i = 0; i < store_costs.length(); i++) 00521 cost_size += store_costs[i]->getData().width(); 00522 // Then resize the 'costs' matrix and fill it. 00523 costs.resize(input.length(), cost_size); 00524 int cost_idx = 0; 00525 for (int i = 0; i < store_costs.length(); i++) { 00526 const Mat& cost_i = store_costs[i]->getData(); 00527 PLASSERT( cost_i.length() == costs.length() ); 00528 costs.subMatColumns(cost_idx, cost_i.width()) << cost_i; 00529 cost_idx += cost_i.width(); 00530 } 00531 } 00532 00534 // computeOutput // 00536 void ModuleLearner::computeOutput(const Vec& input, Vec& output) const 00537 { 00538 // Unefficient implementation. 00539 Vec target(targetsize(), MISSING_VALUE); 00540 Vec costs; 00541 computeOutputAndCosts(input, target, output, costs); 00542 } 00543 00545 // computeCostsFromOutputs // 00547 void ModuleLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 00548 const Vec& target, Vec& costs) const 00549 { 00550 // Unefficient implementation (recompute the output too). 00551 Vec the_output; 00552 computeOutputAndCosts(input, target, the_output, costs); 00553 #ifdef BOUNDCHECK 00554 // Ensure the computed output is the same as the one provided in this 00555 // method. 00556 PLASSERT( output.length() == the_output.length() ); 00557 for (int i = 0; i < output.length(); i++) { 00558 PLASSERT( fast_exact_is_equal(output[i], the_output[i]) ); 00559 } 00560 #endif 00561 } 00562 00564 // getTestCostNames // 00566 TVec<string> ModuleLearner::getTestCostNames() const 00567 { 00568 return cost_ports; 00569 } 00570 00572 // getTrainCostNames // 00574 TVec<string> ModuleLearner::getTrainCostNames() const 00575 { 00576 return cost_ports; 00577 } 00578 00579 } // end of namespace PLearn 00580 00581 00582 /* 00583 Local Variables: 00584 mode:c++ 00585 c-basic-offset:4 00586 c-file-style:"stroustrup" 00587 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00588 indent-tabs-mode:nil 00589 fill-column:79 00590 End: 00591 */ 00592 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :