PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // CostModule.cc 00004 // 00005 // Copyright (C) 2006 Pascal Lamblin 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pascal Lamblin 00036 00041 #include "CostModule.h" 00042 #include <plearn/math/TMat_maths.h> 00043 00044 00045 namespace PLearn { 00046 using namespace std; 00047 00048 PLEARN_IMPLEMENT_OBJECT( 00049 CostModule, 00050 "General class representing a cost function module", 00051 "It usually takes an input and a target, and outputs one cost.\n" 00052 "It can also output more costs, in that case the first one will be the\n" 00053 "objective function to be decreased.\n"); 00054 00055 CostModule::CostModule() : 00056 target_size(-1) 00057 { 00058 } 00059 00060 void CostModule::declareOptions(OptionList& ol) 00061 { 00062 declareOption(ol, "target_size", &CostModule::target_size, 00063 OptionBase::buildoption, 00064 "Size of the target vectors."); 00065 00066 // Now call the parent class' declareOptions 00067 inherited::declareOptions(ol); 00068 00069 redeclareOption(ol, "output_size", &CostModule::output_size, 00070 OptionBase::buildoption, 00071 "Number of costs (outputs)."); 00072 } 00073 00074 void CostModule::build_() 00075 { 00076 } 00077 00078 // ### Nothing to add here, simply calls build_ 00079 void CostModule::build() 00080 { 00081 inherited::build(); 00082 build_(); 00083 } 00084 00085 00087 // makeDeepCopyFromShallowCopy // 00089 void CostModule::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00090 { 00091 inherited::makeDeepCopyFromShallowCopy(copies); 00092 00093 deepCopyField(tmp_costs, copies); 00094 deepCopyField(tmp_input_and_target, copies); 00095 deepCopyField(tmp_input_and_target_gradient, copies); 00096 deepCopyField(tmp_input_and_target_diag_hessian, copies); 00097 deepCopyField(tmp_costs_mat, copies); 00098 deepCopyField(tmp_input_gradients, copies); 00099 deepCopyField(store_costs, copies); 00100 } 00101 00103 // bpropAccUpdate // 00105 void CostModule::bpropAccUpdate(const TVec<Mat*>& ports_value, 00106 const TVec<Mat*>& ports_gradient) 00107 { 00108 if (ports_gradient.length() == 3) { 00109 Mat* pred_grad = ports_gradient[0]; 00110 Mat* target_grad = ports_gradient[1]; 00111 Mat* cost_grad = ports_gradient[2]; 00112 if (!pred_grad && !target_grad) { 00113 // No gradient is being asked. 00114 checkProp(ports_gradient); 00115 return; 00116 } 00117 if (pred_grad && !target_grad && cost_grad && 00118 pred_grad->isEmpty() && !cost_grad->isEmpty()) 00119 { 00120 // We can probably use the standard mini-batch bpropUpdate. 00121 // Currently we allow this only in the case where a single cost is 00122 // computed. This is because the bpropUpdate method in CostModule 00123 // takes only the value of the first cost as parameter, and we may 00124 // need the value of all costs. 00125 PLASSERT( cost_grad->width() == 1 ); 00126 #ifdef BOUNDCHECK 00127 // The gradient on the cost must be one if we want to re-use 00128 // exactly the existing code. 00129 for (int i = 0; i < cost_grad->length(); i++) { 00130 for (int j = 0; j < cost_grad->width(); j++) { 00131 PLASSERT( fast_exact_is_equal((*cost_grad)(i, j), 1) ); 00132 } 00133 } 00134 #endif 00135 Mat* cost_val = ports_value[2]; 00136 PLASSERT( cost_val ); 00137 Vec costs_vec; 00138 if (cost_val->mod() == 1) { 00139 // We can view the cost column matrix as a vector. 00140 costs_vec = cost_val->toVec(); 00141 } else { 00142 // We need to make a copy of the cost. 00143 store_costs.resize(cost_val->length()); 00144 store_costs << cost_val->column(0); 00145 costs_vec = store_costs; 00146 } 00147 Mat* pred_val = ports_value[0]; 00148 Mat* target_val = ports_value[1]; 00149 PLASSERT( pred_val && target_val ); 00150 pred_grad->resize(pred_val->length(), pred_val->width()); 00151 bpropUpdate(*pred_val, *target_val, costs_vec, *pred_grad, true); 00152 checkProp(ports_gradient); 00153 return; 00154 } 00155 if (pred_grad && pred_grad->isEmpty() && !cost_grad) { 00156 // We are asked to compute a gradient w.r.t. prediction, but no 00157 // gradient w.r.t. output cost is being provided. 00158 PLERROR("In CostModule::bpropAccUpdate - Module '%s' of class '%s'" 00159 " cannot compute a gradient w.r.t. its 'prediction' port " 00160 "when no gradient w.r.t. its 'cost' port is being provided" 00161 " (if within a NetworkModule, ensure incoming connections " 00162 "to '%s.prediction' have their 'propagate_gradient' flag " 00163 "set to false, or outgoing connections from '%s.cost' have" 00164 " their 'propagate_gradient' flag set to true).", 00165 OnlineLearningModule::name.c_str(), classname().c_str(), 00166 OnlineLearningModule::name.c_str(), 00167 OnlineLearningModule::name.c_str()); 00168 } 00169 } 00170 // Try to use the parent's default method. 00171 inherited::bpropAccUpdate(ports_value, ports_gradient); 00172 } 00173 00175 // fprop // 00177 void CostModule::fprop(const Vec& input, const Vec& target, Vec& cost) const 00178 { 00179 PLERROR("CostModule::fprop(const Vec& input, const Vec& target, Vec& cost)" 00180 "\n" 00181 "is not implemented. You have to implement it in your class.\n"); 00182 } 00183 00184 void CostModule::fprop(const Mat& inputs, const Mat& targets, Mat& costs) const 00185 { 00186 PLERROR("In CostModule::fprop - Mini-batch version not implemented for " 00187 "class %s", classname().c_str()); 00188 } 00189 00190 void CostModule::fprop(const Vec& input, const Vec& target, real& cost) const 00191 { 00192 // Keep only the first cost. 00193 fprop( input, target, tmp_costs ); 00194 cost = tmp_costs[0]; 00195 } 00196 00197 void CostModule::fprop(const Mat& inputs, const Mat& targets, Vec& costs) 00198 { 00199 // Keep only the first cost. 00200 tmp_costs_mat.resize(inputs.length(), output_size); 00201 fprop(inputs, targets, tmp_costs_mat); 00202 costs.resize(tmp_costs_mat.length()); 00203 costs << tmp_costs_mat.column(0); 00204 } 00205 00207 void CostModule::fprop(const Vec& input_and_target, Vec& output) const 00208 { 00209 PLASSERT( input_and_target.size() == input_size + target_size ); 00210 fprop( input_and_target.subVec( 0, input_size ), 00211 input_and_target.subVec( input_size, target_size ), 00212 output ); 00213 } 00214 00215 void CostModule::fprop(const TVec<Mat*>& ports_value) 00216 { 00217 PLASSERT( ports_value.length() == nPorts() ); 00218 if (ports_value.length() == 3) { 00219 Mat* prediction = ports_value[0]; 00220 Mat* target = ports_value[1]; 00221 Mat* cost = ports_value[2]; 00222 if (prediction && target && cost && 00223 !prediction->isEmpty() && !target->isEmpty() && cost->isEmpty()) 00224 { 00225 // Standard fprop: (prediction, target) -> cost 00226 fprop(*prediction, *target, *cost); 00227 return; 00228 } 00229 } 00230 // Default version does not work: try to re-use the parent's default fprop. 00231 inherited::fprop(ports_value); 00232 } 00233 00235 // getPorts // 00237 const TVec<string>& CostModule::getPorts() { 00238 static TVec<string> default_ports; 00239 if (default_ports.isEmpty()) { 00240 default_ports.append("prediction"); 00241 default_ports.append("target"); 00242 default_ports.append("cost"); 00243 } 00244 return default_ports; 00245 } 00246 00248 // getPortSizes // 00250 const TMat<int>& CostModule::getPortSizes() { 00251 int n_ports = nPorts(); 00252 if (port_sizes.length() != n_ports) { 00253 port_sizes.resize(n_ports, 2); 00254 port_sizes.fill(-1); 00255 if (n_ports >= 3) { 00256 PLASSERT( getPorts()[0] == "prediction" && 00257 getPorts()[1] == "target" && 00258 getPorts()[2] == "cost" ); 00259 port_sizes(0, 1) = input_size; 00260 port_sizes(1, 1) = target_size; 00261 port_sizes(2, 1) = output_size; 00262 } 00263 } 00264 return port_sizes; 00265 } 00266 00268 // bpropUpdate // 00270 void CostModule::bpropUpdate(const Vec& input, const Vec& target, real cost, 00271 Vec& input_gradient, bool accumulate) 00272 { 00273 // default version, calling the bpropUpdate with inherited prototype 00274 tmp_input_and_target.resize( input_size + target_size ); 00275 tmp_input_and_target.subVec( 0, input_size ) << input; 00276 tmp_input_and_target.subVec( input_size, target_size ) << target; 00277 tmp_input_and_target_gradient.resize( input_size + target_size ); 00278 tmp_costs.resize(1); 00279 tmp_costs[0] = cost; 00280 static const Vec one(1,1); 00281 00282 bpropUpdate( tmp_input_and_target, tmp_costs, 00283 tmp_input_and_target_gradient, one ); 00284 00285 if( accumulate ) 00286 { 00287 PLASSERT_MSG( input_gradient.size() == input_size, 00288 "Cannot resize input_gradient AND accumulate into it" ); 00289 input_gradient += tmp_input_and_target_gradient.subVec( 0, input_size ); 00290 } 00291 else 00292 { 00293 input_gradient.resize( input_size ); 00294 input_gradient << tmp_input_and_target_gradient.subVec( 0, input_size ); 00295 } 00296 } 00297 00298 void CostModule::bpropUpdate(const Vec& input, const Vec& target, real cost) 00299 { 00300 bpropUpdate( input, target, cost, tmp_input_gradient ); 00301 } 00302 00303 void CostModule::bpropUpdate(const Mat& inputs, const Mat& targets, 00304 const Vec& costs) 00305 { 00306 PLWARNING("In CostModule::bpropUpdate - Using default (possibly " 00307 "inefficient) version for class %s", classname().c_str()); 00308 bpropUpdate( inputs, targets, costs, tmp_input_gradients ); 00309 } 00310 00311 void CostModule::bpropUpdate(const Vec& input_and_target, const Vec& output, 00312 Vec& input_and_target_gradient, 00313 const Vec& output_gradient, 00314 bool accumulate) 00315 { 00316 inherited::bpropUpdate( input_and_target, output, 00317 input_and_target_gradient, output_gradient, 00318 accumulate ); 00319 } 00320 00321 00322 void CostModule::bbpropUpdate(const Vec& input, const Vec& target, real cost, 00323 Vec& input_gradient, Vec& input_diag_hessian, 00324 bool accumulate) 00325 { 00326 // default version, calling the bpropUpdate with inherited prototype 00327 tmp_input_and_target.resize( input_size + target_size ); 00328 tmp_input_and_target.subVec( 0, input_size ) << input; 00329 tmp_input_and_target.subVec( input_size, target_size ) << target; 00330 tmp_input_and_target_gradient.resize( input_size + target_size ); 00331 tmp_input_and_target_diag_hessian.resize( input_size + target_size ); 00332 tmp_costs.resize(1); 00333 tmp_costs[0] = cost; 00334 static const Vec one(1,1); 00335 static const Vec zero(1); 00336 00337 bbpropUpdate( tmp_input_and_target, tmp_costs, 00338 tmp_input_and_target_gradient, one, 00339 tmp_input_and_target_diag_hessian, zero, 00340 accumulate ); 00341 00342 if( accumulate ) 00343 { 00344 PLASSERT_MSG( input_gradient.size() == input_size, 00345 "Cannot resize input_gradient AND accumulate into it" ); 00346 PLASSERT_MSG( input_diag_hessian.size() == input_size, 00347 "Cannot resize input_diag_hessian AND accumulate into it" 00348 ); 00349 00350 input_gradient += tmp_input_and_target_gradient.subVec( 0, input_size ); 00351 input_diag_hessian += 00352 tmp_input_and_target_diag_hessian.subVec( 0, input_size ); 00353 } 00354 else 00355 { 00356 input_gradient.resize( input_size ); 00357 input_diag_hessian.resize( input_size ); 00358 input_gradient << tmp_input_and_target_gradient.subVec( 0, input_size ); 00359 input_diag_hessian << 00360 tmp_input_and_target_diag_hessian.subVec( 0, input_size ); 00361 } 00362 } 00363 00364 void CostModule::bbpropUpdate(const Vec& input, const Vec& target, real cost) 00365 { 00366 bbpropUpdate( input, target, cost, 00367 tmp_input_gradient, tmp_input_diag_hessian ); 00368 } 00369 00370 void CostModule::bbpropUpdate(const Vec& input_and_target, const Vec& output, 00371 Vec& input_and_target_gradient, 00372 const Vec& output_gradient, 00373 Vec& input_and_target_diag_hessian, 00374 const Vec& output_diag_hessian, 00375 bool accumulate) 00376 { 00377 inherited::bbpropUpdate( input_and_target, output, 00378 input_and_target_gradient, 00379 output_gradient, 00380 input_and_target_diag_hessian, 00381 output_diag_hessian, 00382 accumulate ); 00383 } 00384 00386 // forget // 00388 void CostModule::forget() 00389 { 00390 } 00391 00393 // name // 00395 TVec<string> CostModule::costNames() 00396 { 00397 return TVec<string>(); 00398 } 00399 00400 } // end of namespace PLearn 00401 00402 00403 /* 00404 Local Variables: 00405 mode:c++ 00406 c-basic-offset:4 00407 c-file-style:"stroustrup" 00408 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00409 indent-tabs-mode:nil 00410 fill-column:79 00411 End: 00412 */ 00413 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :