PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: SumOfVariable.cc 8853 2008-04-21 20:55:06Z tihocan $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #include "SumOfVariable.h" 00044 #include <plearn/display/DisplayUtils.h> 00045 00046 #if USING_MPI 00047 #include <plearn/sys/PLMPI.h> 00048 #endif 00049 00050 namespace PLearn { 00051 using namespace std; 00052 00053 00054 00057 PLEARN_IMPLEMENT_OBJECT( 00058 SumOfVariable, 00059 "Sums the value of a Function evaluated on each row of a VMatrix", 00060 "SumOfVariable computes the sum of the value of a Func evaluated on each row\n" 00061 "of a VMat. This summation is not necessarily constrained to be over all\n" 00062 "the rows: each fprop computes the sum over 'nsample' rows of the associated\n" 00063 "VMatrix. This Variable is used within the implementation of NNet to create\n" 00064 "the optimization criterion over the training set (which corresponds here to\n" 00065 "the VMatrix we are summing over).\n"); 00066 00067 00069 // SumOfVariable // 00071 SumOfVariable::SumOfVariable(): 00072 nsamples(0), 00073 curpos(0), 00074 loop(false), 00075 do_sizeprop(false) 00076 {} 00077 00078 SumOfVariable::SumOfVariable(VMat the_distr, Func the_f, int the_nsamples, 00079 bool the_do_sizeprop, bool call_build_): 00080 inherited(nonInputParentsOfPath(the_f->inputs, the_f->outputs), 00081 the_f->outputs[0]->length(), 00082 the_f->outputs[0]->width(), 00083 call_build_), 00084 distr(the_distr), 00085 f(the_f), 00086 nsamples(the_nsamples), 00087 curpos(0), 00088 loop(false), 00089 input_value(the_distr->width()), 00090 input_gradient(the_distr->width()), 00091 output_value(the_f->outputs[0]->size()), 00092 do_sizeprop(the_do_sizeprop) 00093 { 00094 if (call_build_) 00095 build_(); 00096 } 00097 00099 // build // 00101 void SumOfVariable::build() 00102 { 00103 inherited::build(); 00104 build_(); 00105 } 00106 00108 // build_ // 00110 void SumOfVariable::build_() 00111 { 00112 if (f && distr) 00113 { 00114 varray = nonInputParentsOfPath(f->inputs, f->outputs); 00115 // We need to rebuild the parent class since a build option changed. 00116 inherited::build(); 00117 00118 input_value.resize(distr->inputsize() + distr->targetsize() + distr->weightsize()); 00119 input_gradient.resize(distr->inputsize() + distr->targetsize() + distr->weightsize()); 00120 if(f->outputs.size() != 1) 00121 PLERROR("In SumOfVariable::build_: function must have a single " 00122 "variable output (maybe you can vconcat the vars into a " 00123 "single one prior to calling sumOf, if this is really " 00124 "what you want)"); 00125 if(nsamples == -1) 00126 nsamples = distr->length(); 00127 f->inputs.setDontBpropHere(true); 00128 } 00129 } 00130 00131 void 00132 SumOfVariable::declareOptions(OptionList &ol) 00133 { 00134 declareOption(ol, "distr", &SumOfVariable::distr, OptionBase::buildoption, 00135 "VMatrix over which the summation should be done."); 00136 declareOption(ol, "f", &SumOfVariable::f, OptionBase::buildoption, 00137 "Function that is passed the rows of the VMat as input."); 00138 declareOption(ol, "nsamples", &SumOfVariable::nsamples, OptionBase::buildoption, 00139 "How many rows of the VMatrix should be summed at a time when\n" 00140 "performing an fprop/bprop on the Variable. If -1 (the default)\n" 00141 "the length of 'distr' is assumed, i.e. the sum is done over\n" 00142 "all rows of the matrix."); 00143 declareOption(ol, "curpos", &SumOfVariable::curpos, OptionBase::buildoption, 00144 "Current position (row) in the VMatrix we are summing over."); 00145 declareOption(ol, "loop", &SumOfVariable::loop, OptionBase::buildoption, 00146 "If true, every propagation operation, before returning,\n" 00147 "will set back curpos to the value it had when entering\n" 00148 "the call. So curpos will be left unchanged by the call.\n" 00149 "This behavior corresponds to propagation operations \n" 00150 "always summing over the same nsamples (in range \n" 00151 "curpos, ..., curpos+nsamples-1) \n" 00152 "If loop is false however, any propagation call will \n" 00153 "move curpos by nsamples, thus a subsequent propagation \n" 00154 "call will sum over the *next* nsamples (which will correspond \n" 00155 "to the same saples only if nsamples == distr.length())."); 00156 inherited::declareOptions(ol); 00157 } 00158 00159 00160 void SumOfVariable::recomputeSize(int& l, int& w) const 00161 { 00162 if (f && f->outputs.size()) { 00163 l = f->outputs[0]->length(); 00164 w = f->outputs[0]->width(); 00165 } else 00166 l = w = 0; 00167 } 00168 00169 00170 void SumOfVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00171 { 00172 inherited::makeDeepCopyFromShallowCopy(copies); 00173 deepCopyField(distr, copies); 00174 deepCopyField(f, copies); 00175 } 00176 00177 00178 void SumOfVariable::fprop() 00179 { 00180 int orig_curpos = curpos; 00181 00182 f->recomputeParents(); 00183 00184 if(nsamples==1) 00185 { 00186 input_value.resize(distr->width()); 00187 distr->getRow(curpos, input_value); 00188 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00189 if(do_sizeprop) f->sizefprop(input_value, value); 00190 else f->fprop(input_value, value); 00191 if(++curpos == distr->length()) 00192 curpos = 0; 00193 } 00194 else 00195 { 00196 value.clear(); 00197 #if USING_MPI 00198 if (nsamples > distr->length()) 00199 PLERROR("In SumOfVariable::fprop, the case where nsamples is greater than distr->length is not supported in parallel computation"); 00200 int nb_sample = nsamples/PLMPI::size; 00201 int start_pos = PLMPI::rank * nb_sample; 00202 int end_pos = (PLMPI::rank==PLMPI::size-1) ? nsamples : start_pos + nb_sample; 00203 Vec dummy_value(value.length()); 00204 for(int i=start_pos; i<end_pos; i++) 00205 { 00206 input_value.resize(distr->width()); 00207 distr->getRow(i, input_value); 00208 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00209 if(do_sizeprop) f->sizefprop(input_value, output_value); 00210 else f->fprop(input_value, output_value); 00211 dummy_value += output_value; 00212 } 00213 MPI_Allreduce(dummy_value.data(), value.data(), value.length(), PLMPI_REAL, MPI_SUM, MPI_COMM_WORLD); 00214 #else 00215 for(int i=0; i<nsamples; i++) 00216 { 00217 input_value.resize(distr->width()); 00218 distr->getRow(curpos, input_value); 00219 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00220 if(do_sizeprop) f->sizefprop(input_value, output_value); 00221 else f->fprop(input_value, output_value); 00222 value += output_value; 00223 if(++curpos == distr->length()) 00224 curpos = 0; 00225 } 00226 #endif 00227 } 00228 00229 if(loop) 00230 curpos = orig_curpos; 00231 } 00232 00233 00234 void SumOfVariable::bprop() 00235 { fbprop(); } 00236 00237 00238 void SumOfVariable::fbprop() 00239 { 00240 f->recomputeParents(); 00241 int orig_curpos = curpos; 00242 00243 if(nsamples==1) 00244 { 00245 input_value.resize(distr->width()); 00246 distr->getRow(curpos, input_value); 00247 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00248 //displayFunction(f, true, false, 250); 00249 if(do_sizeprop) f->sizefbprop(input_value, value, input_gradient, gradient); 00250 else f->fbprop(input_value, value, input_gradient, gradient); 00251 //displayFunction(f, true, false, 250); 00252 if(++curpos == distr->length()) 00253 curpos = 0; 00254 } 00255 else 00256 { 00257 value.clear(); 00258 #if USING_MPI 00259 if (nsamples > distr->length()) 00260 PLERROR("In SumOfVariable::fbprop, the case where nsamples is greater than distr->length is not supported in parallel computation"); 00261 int nb_sample = nsamples/PLMPI::size; 00262 int start_pos = PLMPI::rank * nb_sample; 00263 int end_pos = (PLMPI::rank==PLMPI::size-1) ? nsamples : start_pos + nb_sample; 00264 Vec dummy_value(value.length()); 00265 for(int i=start_pos; i<end_pos; i++) 00266 { 00267 input_value.resize(distr->width()); 00268 distr->getRow(i, input_value); 00269 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00270 if(do_sizeprop) f->sizefbprop(input_value, output_value, input_gradient, gradient); 00271 else f->fbprop(input_value, output_value, input_gradient, gradient); 00272 dummy_value += output_value; 00273 } 00274 MPI_Allreduce(dummy_value.data(), value.data(), value.length(), PLMPI_REAL, MPI_SUM, MPI_COMM_WORLD); 00275 VarArray params = f->parameters; 00276 for (int i=0; i<params->length(); i++) 00277 { 00278 Vec buffer(params[i]->size()); 00279 MPI_Reduce(params[i]->gradientdata, buffer.data(), buffer.length(), PLMPI_REAL, MPI_SUM, 0, MPI_COMM_WORLD); 00280 buffer >> params[i]->gradient; 00281 MPI_Bcast(params[i]->gradientdata, buffer.length(), PLMPI_REAL, 0, MPI_COMM_WORLD); 00282 } 00283 #else 00284 for(int i=0; i<nsamples; i++) 00285 { 00286 input_value.resize(distr->width()); 00287 distr->getRow(curpos, input_value); 00288 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00289 static bool display_fn=false; 00290 if (display_fn) 00291 displayFunction(f, true, false, 250); 00292 if(do_sizeprop) f->sizefbprop(input_value, output_value, input_gradient, gradient); 00293 else f->fbprop(input_value, output_value, input_gradient, gradient); 00294 value += output_value; 00295 if(++curpos == distr->length()) 00296 curpos = 0; 00297 } 00298 #endif 00299 } 00300 00301 if(loop) 00302 curpos = orig_curpos; 00303 00304 } 00305 00306 00307 void SumOfVariable::symbolicBprop() 00308 { 00309 /* 00310 // f is a function of its inputs, what we want is a function of the parameters of f (which are in the inputs field of this SumOfVariable) 00311 VarArray& params = varray; 00312 int nparams = params.size(); 00313 f->bproppath.symbolicBprop(); 00314 00315 VarArray dparams(nparams); 00316 for(int i=0; i<nparams; i++) 00317 dparams[i] = params[i]->g; 00318 00319 Var dparams_concat = new ConcatElementsVariable(dparams); 00320 Var dparams_sum = new SumOfVariable(distr, Func(params,dparams_concat), nsamples); 00321 00322 for(int i=0; i<nparams; i++) 00323 params[i]->g += dparams_sum.sub(...) 00324 */ 00325 } 00326 00327 00328 void SumOfVariable::rfprop() 00329 { 00330 int orig_curpos = curpos; 00331 00332 if (rValue.length()==0) resizeRValue(); 00333 // TODO... (we will need a rfprop() in Func) 00334 00335 // f->recomputeParents(); 00336 00337 // if(nsamples==1) 00338 // { 00339 // distr->getRow(curpos, input_value); 00340 // f->fprop(input_value, value); 00341 // if(++curpos == distr->length()) 00342 // curpos = 0; 00343 // } 00344 // else 00345 // { 00346 // value.clear(); 00347 // #if USING_MPI 00348 // if (nsamples > distr->length()) 00349 // PLERROR("In SumOfVariable::fprop, the case where nsamples is greater than distr->length is not supported in parallel computation"); 00350 // int nb_sample = nsamples/PLMPI::size; 00351 // int start_pos = PLMPI::rank * nb_sample; 00352 // int end_pos = (PLMPI::rank==PLMPI::size-1) ? nsamples : start_pos + nb_sample; 00353 // Vec dummy_value(value.length()); 00354 // for(int i=start_pos; i<end_pos; i++) 00355 // { 00356 // distr->getRow(i, input_value); 00357 // f->fprop(input_value, output_value); 00358 // dummy_value += output_value; 00359 // } 00360 // MPI_Allreduce(dummy_value.data(), value.data(), value.length(), PLMPI_REAL, MPI_SUM, MPI_COMM_WORLD); 00361 // #else 00362 // for(int i=0; i<nsamples; i++) 00363 // { 00364 // distr->getRow(curpos, input_value); 00365 // f->fprop(input_value, output_value); 00366 // value += output_value; 00367 // if(++curpos == distr->length()) 00368 // curpos = 0; 00369 // } 00370 // #endif 00371 // } 00372 00373 00374 if(loop) 00375 curpos = orig_curpos; 00376 00377 } 00378 00379 00380 void SumOfVariable::printInfo(bool print_gradient) 00381 { 00382 Vec input_value(distr->width()); 00383 Vec input_gradient(distr->width()); 00384 Vec output_value(nelems()); 00385 00386 f->recomputeParents(); 00387 value.clear(); 00388 00389 for(int i=0; i<nsamples; i++) 00390 { 00391 input_value.resize(distr->width()); 00392 distr->getRow(curpos++,input_value); 00393 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00394 00395 if(do_sizeprop) f->sizefprop(input_value,output_value); 00396 if (print_gradient) 00397 f->fbprop(input_value, output_value, input_gradient, gradient); 00398 else 00399 f->fprop(input_value, output_value); 00400 value += output_value; 00401 if(curpos>=distr->length()) 00402 curpos = 0; 00403 f->fproppath.printInfo(print_gradient); 00404 } 00405 pout << info() << " : " << getName() << " = " << value; 00406 if (print_gradient) cout << " gradient=" << gradient; 00407 pout << endl; 00408 } 00409 00410 00411 00412 } // end of namespace PLearn 00413 00414 00415 /* 00416 Local Variables: 00417 mode:c++ 00418 c-basic-offset:4 00419 c-file-style:"stroustrup" 00420 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00421 indent-tabs-mode:nil 00422 fill-column:79 00423 End: 00424 */ 00425 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :