PLearn 0.1
Func.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio and University of Montreal
00006 //
00007 
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037  
00038 
00039 /* *******************************************************      
00040  * $Id: Func.cc 9093 2008-06-03 21:05:24Z plearner $
00041  * This file is part of the PLearn library.
00042  ******************************************************* */
00043 
00044 #include "Func.h"
00045 #include <plearn/math/random.h>
00046 #include <plearn/math/TMat_maths.h>
00047 #include "Var.h"
00048 #include "Var_operators.h"
00049 #include "TimesConstantVariable.h"
00050 //#include <plearn/display/DisplayUtils.h> ////////// to remove
00051 
00052 namespace PLearn {
00053 using namespace std;
00054 
00057 Func::Func()
00058 {}
00059 
00060 Func::Func(Function* f) 
00061     :PP<Function>(f) 
00062 {}
00063 
00064 Func::Func(const VarArray& the_inputs, const VarArray& parameters_to_optimize,const VarArray& the_outputs)
00065     :PP<Function>(new Function(the_inputs,parameters_to_optimize,the_outputs))
00066 {}
00067 Func::Func(const VarArray& the_inputs, const VarArray& the_outputs)
00068     :PP<Function>(new Function(the_inputs, the_outputs))
00069 {}
00070 
00071 /*void Func::bprop(VarArray& parameters_to_optimize)
00072   {
00073   ptr->bprop(parameters_to_optimize);
00074   }
00075 */
00076 
00077 Vec Func::operator()(const Vec& input) const
00078 { return ptr->operator()(input); }
00079 
00080 real Func::operator()(const Vec& input1, const Vec& input2) const
00081 { return ptr->operator()(input1, input2); }
00082 
00083 VarArray Func::operator()(const VarArray& new_inputs) const
00084 { return ptr->operator()(new_inputs); }
00085 
00086 Func operator/(Func f, real value)
00087 { 
00088     if(fast_exact_is_equal(value, 1.0))
00089         return f;
00090     else
00091     {
00092         int nouts = f->outputs.size();
00093         VarArray outs(nouts);
00094         for(int i=0; i<nouts; i++)
00095             outs[i] = f->outputs[i]/value;
00096         return Func(f->inputs, outs);
00097     }
00098 }
00099 
00100 
00103 Function::Function()
00104     :inputsize(-1), outputsize(-1)
00105 {}
00106 
00107 
00108 Function::Function(const VarArray& the_inputs, const VarArray& the_outputs)
00109     :inputs(the_inputs), outputs(the_outputs)
00110 {  
00111     build_();
00112 }
00113 
00114 Function::Function(const VarArray& the_inputs, const VarArray& parameters_to_optimize,const VarArray& the_outputs)
00115     : inputs(the_inputs), parameters(parameters_to_optimize), outputs(the_outputs)
00116 {
00117     build_();
00118 }
00119 
00120 /*void Function::bprop(VarArray& parameters_to_optimize)
00121   {
00122   //bproppath = propagationPath(inputs, parameters_to_optimize,outputs);
00123   }
00124 */
00125 
00126 PLEARN_IMPLEMENT_OBJECT(
00127         Function,
00128         "Implements a function defined as a graph of Variables.",
00129         ""
00130 );
00131 
00133 // declareOptions //
00135 void Function::declareOptions(OptionList& ol)
00136 {
00137     declareOption(ol, "inputs", &Function::inputs, OptionBase::buildoption,
00138                   "The list of input variabes of this function");
00139     declareOption(ol, "parameters", &Function::parameters, OptionBase::buildoption,
00140                   "The list of parameters to optimize");
00141     declareOption(ol, "outputs", &Function::outputs, OptionBase::buildoption,
00142                   "The list of output variables of this function");
00143   
00144     // Now call the parent class' declareOptions
00145     inherited::declareOptions(ol);
00146 }
00147 
00149 // build_ //
00151 void Function::build_()
00152 {
00153     if(parameters.isEmpty())
00154         parameters = nonInputSources(inputs, outputs);
00155   
00156     inputsize = inputs.nelems();
00157     outputsize = outputs.nelems(); 
00158   
00159     fproppath = propagationPath(inputs, outputs);
00160     if (fproppath.length()==0) // to handle the weird case in which there is no path from inputs to outputs
00161         // but outputs still depends on parameters and we want to represent that dependency 
00162     {
00163         fproppath = propagationPath(inputs & parameters, outputs);
00164         bproppath = propagationPath(inputs & parameters, outputs);
00165     }
00166     else
00167         bproppath = propagationPath(inputs, outputs);
00168 
00169     parentspath = propagationPathToParentsOfPath(inputs, outputs);
00170     recomputeParents();
00171 
00172     //parameters_to_optimize.printNames();
00173     //cout<<"**************Func::printInfo(inputs, outputs);"<<endl;
00174     //printInfo(inputs, outputs);
00175     //cout<<"**************Func::printInfo(parameters_to_optimize, outputs);"<<endl;
00176     //printInfo(parameters_to_optimize,outputs);
00177     //displayVarGraph(fproppath,true, 333, "ffpp", false);
00178     //displayVarGraph(bproppath,true, 333, "fbpp", false);
00179     
00180     
00181     // Let's see if getting everything in a single chunk of memory will improve efficiency...
00182     // Hmm, doesn't seem to.
00183     /*
00184       VarArray criticalvars = the_inputs & fproppath;
00185       int n = criticalvars.nelems();
00186       Vec data(2*n);
00187       criticalvars.makeSharedValue(data);
00188       criticalvars.makeSharedGradient(data,n);
00189     */
00190 }
00191 
00192 void Function::build()
00193 {
00194     inherited::build();
00195     build_();
00196 }
00197 
00198 void Function::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00199 {
00200     inherited::makeDeepCopyFromShallowCopy(copies);
00201     deepCopyField(inputs, copies);
00202     deepCopyField(outputs, copies);
00203     deepCopyField(fproppath, copies);
00204     deepCopyField(bproppath, copies);
00205     deepCopyField(parentspath, copies);
00206     deepCopyField(df, copies);
00207     deepCopyField(parameters, copies);
00208 }
00209 
00210 void Function::fprop(const Vec& in, const Vec& out) const
00211 {
00212     inputs << in;
00213     fproppath.fprop();
00214     outputs >> out;
00215 }
00216 
00217 void Function::fprop(const Array<Vec>& in, const Array<Vec>& out) const
00218 {
00219     inputs << in;
00220     fproppath.fprop();
00221     outputs >> out;
00222 }
00223 
00224 void Function::sizefprop(const Vec& in, const Vec& out) const
00225 {
00226     inputs << in;
00227     fproppath.sizefprop();
00228     outputs >> out;
00229 }
00230 
00231 void Function::sizefprop(const Array<Vec>& in, const Array<Vec>& out) const
00232 {
00233     inputs << in;
00234     fproppath.sizefprop();
00235     outputs >> out;
00236 }
00237 
00238 real Function::operator()(const Vec& input1, const Vec& input2) const
00239 {
00240     if(inputs.size()!=2 || outputsize!=1)
00241         PLERROR("You can only call real Function::operator()(const Vec& input1, const Vec& input2) for a function that has 2 input Vars and a single scalar output Var"); 
00242     inputs[0]->copyFrom(input1);
00243     inputs[1]->copyFrom(input2);
00244     fproppath.fprop();
00245     return outputs[0]->value[0];
00246 }
00247 
00248 void Function::fbprop(const Vec& in, const Vec& out, const Vec& input_gradient, const Vec& output_gradient)
00249 {
00250     inputs << in;
00251     inputs.clearGradient();
00252     fproppath.clearGradient();
00253     outputs.copyGradientFrom(output_gradient);
00254     fproppath.fbprop();
00255     outputs >> out;
00256     inputs.copyGradientTo(input_gradient);
00257 
00258 #ifdef BOUNDCHECK
00259     if (out.hasMissing())
00260         PLERROR("Function::fbprop: detected MISSING_VALUE in function output!");
00261   
00262     //static bool displayvargraph=false;
00263     //if (displayvargraph)
00264     //  displayVarGraph(outputs,true);
00265 #endif
00266 }
00267 
00268 void Function::fbprop(const Array<Vec>& in, const Array<Vec>& out, const Array<Vec>& input_gradient, const Array<Vec>& output_gradient)
00269 {
00270     inputs << in;
00271     inputs.clearGradient();
00272     fproppath.clearGradient();
00273     outputs.copyGradientFrom(output_gradient);
00274     fproppath.fbprop();
00275     outputs >> out;
00276     inputs.copyGradientTo(input_gradient);
00277 
00278 #ifdef BOUNDCHECK
00279     if (out.hasMissing())
00280         PLERROR("Function::fbprop: detected MISSING_VALUE in function output!");
00281 #endif
00282 }
00283 
00284 void Function::sizefbprop(const Vec& in, const Vec& out, const Vec& input_gradient, const Vec& output_gradient)
00285 {
00286     inputs << in;
00287     inputs.clearGradient();
00288     fproppath.clearGradient();
00289     outputs.copyGradientFrom(output_gradient);
00290     fproppath.sizefbprop();
00291     outputs >> out;
00292     inputs.copyGradientTo(input_gradient);
00293 
00294 #ifdef BOUNDCHECK
00295     if (out.hasMissing())
00296         PLERROR("Function::fbprop: detected MISSING_VALUE in function output!");
00297   
00298     //static bool displayvargraph=false;
00299     //if (displayvargraph)
00300     //  displayVarGraph(outputs,true);
00301 #endif
00302 }
00303 
00304 void Function::sizefbprop(const Array<Vec>& in, const Array<Vec>& out, const Array<Vec>& input_gradient, const Array<Vec>& output_gradient)
00305 {
00306     inputs << in;
00307     inputs.clearGradient();
00308     fproppath.clearGradient();
00309     outputs.copyGradientFrom(output_gradient);
00310     fproppath.sizefbprop();
00311     outputs >> out;
00312     inputs.copyGradientTo(input_gradient);
00313 
00314 #ifdef BOUNDCHECK
00315     if (out.hasMissing())
00316         PLERROR("Function::fbprop: detected MISSING_VALUE in function output!");
00317 #endif
00318 }
00319 
00320 void Function::fbbprop(const Vec& in, const Vec& output, const Vec& gradient, const Mat& hessian)
00321 {
00322     if(df==0)
00323         df = differentiate();
00324 
00325     inputs << in; // inputs and df->inputs are supposed to be the same...
00326     fproppath.fprop();
00327     outputs >> output;
00328     df->fproppath.fprop();
00329     df->outputs >> gradient;
00330 
00331     df->outputs.clearGradient();
00332     int pos = 0;
00333     for(int varnum=0; varnum<df->outputs.size(); varnum++)
00334     {
00335         Var& outputvar = df->outputs[varnum];
00336         for(int i=0; i<outputvar->nelems(); i++)
00337         {
00338             df->inputs.clearGradient();
00339             df->bproppath.clearGradient();
00340             outputvar->gradient[i] = 1.0;
00341             df->bproppath.bprop();
00342             Vec hessian_row = hessian(pos++);
00343             df->inputs.copyGradientTo(hessian_row);
00344             outputvar->gradient[i] = 0.0;
00345         }
00346     }
00347 }
00348 
00349 void Function::fbbpropAcc(const Vec& in, const Vec& output, const Vec& gradient, const Mat& hessian)
00350 {
00351     if(df==0)
00352         df = differentiate();
00353 
00354     inputs << in; // inputs and df->inputs are supposed to be the same...
00355     fproppath.fprop();
00356     outputs.accumulateTo(output);
00357     df->fproppath.fprop();
00358     df->outputs.accumulateTo(gradient);
00359 
00360     df->outputs.clearGradient();
00361     int pos = 0;
00362     for(int varnum=0; varnum<df->outputs.size(); varnum++)
00363     {
00364         Var& outputvar = df->outputs[varnum];
00365         for(int i=0; i<outputvar->nelems(); i++)
00366         {
00367             df->inputs.clearGradient();
00368             df->bproppath.clearGradient();
00369             outputvar->gradient[i] = 1.0;
00370             df->bproppath.bprop();
00371             Vec hessian_row = hessian(pos++);
00372             df->inputs.accumulateGradientTo(hessian_row);
00373             outputvar->gradient[i] = 0.0;
00374         }
00375     }
00376 }
00377 
00378 void Function::rfprop(const Vec& in, const Vec& out, const Vec& input_rvalue, const Vec& output_rvalue, bool do_fprop)
00379 {
00380     if (do_fprop) fprop(in,out);
00381   
00382     inputs.copyRValueFrom(input_rvalue);  
00383     fproppath.rfprop();
00384     outputs.copyRValueTo(output_rvalue);
00385 }
00386 
00387 void Function::recomputeParents()
00388 { parentspath.fprop(); }
00389 
00390 Func Function::differentiate()
00391 {
00392     if (outputs.size()>1)
00393         PLERROR("In Function::differentiate cannot differentiate function with more than one output variable");
00394     Var output = outputs[0];
00395     if(df==0)
00396     {
00397         output->g = Var(1,"output->g"); 
00398         output->g = 1.0; // fill gradient
00399         fproppath.symbolicBprop();
00400         // Give the symbolic gradient vars reasonable names
00401         for(int i=0; i<fproppath.size(); i++)
00402         {
00403             if(!fproppath[i]->g)
00404             {
00405                 string name = "gr_" + fproppath[i]->getName();
00406                 fproppath[i]->g->setName(name);
00407             }
00408         }
00409         for(int i=0; i<inputs.size(); i++)
00410         {
00411             if(inputs[i]->g.isNull()) // must create it, even though it will remain 0
00412                 inputs[i]->g = Var(inputs[i]->length(), inputs[i]->width());
00413             string name = "gr_" + inputs[i]->getName();
00414             inputs[i]->g->setName(name);
00415         }
00416         VarArray dinputs = inputs.symbolicGradient();
00417         // Sanity check:
00418         if(dinputs.nelems() != inputs.nelems())
00419             PLERROR("Problem in Function::differentiate() please send a bug report to vincentp@iro.umontreal.ca");
00420 
00421         cerr << "i0: " << inputs[0]->classname() << endl;
00422         cerr << "i1: " << inputs[1]->classname() << endl;
00423         cerr << "di0: " << dinputs[0]->classname() << endl;
00424         cerr << "di1: " << dinputs[1]->classname() << endl;
00425         dinputs.resizeRValue();
00426         cerr << "di0 = " << dinputs[0]->rvaluedata << endl;
00427         df = Func(inputs, dinputs);
00428         df->fproppath = propagationPath(fproppath.parents() & (VarArray)output->g, dinputs);
00429         fproppath.clearSymbolicGradient();
00430     }
00431     return df;
00432 }
00433 
00434 Vec Function::operator()(const Vec& input) const
00435 { 
00436     Vec output(outputsize);
00437     fprop(input,output); 
00438     return output;
00439 }
00440 
00441 // new version that uses the new deepCopy system
00442 
00443 VarArray Function::operator()(const VarArray& new_inputs) const
00444 {
00445     CopiesMap copies;
00446 
00447     // make sure the clones of the old inputs are the new inputs
00448     for(int i=0; i<inputs.size(); i++)
00449     {
00450         if(new_inputs[i]->length()!=inputs[i]->length() || new_inputs[i]->width()!=inputs[i]->width())
00451             PLERROR("In Function::operator()(const VarArray& new_inputs) dimensions of variables in new_inputs and inputs do not match");
00452         copies[(Variable*)inputs[i]] = (Variable*)new_inputs[i];
00453         if (!new_inputs[i]->nameIsSet() && inputs[i]->nameIsSet())
00454             new_inputs[i]->setName(inputs[i]->getName());
00455     }
00456 
00457     // make sure that only the vars on the direct path from inputs to outputs
00458     // get cloned but the clones should have the same parents as the
00459     // originals so that gradients can be accumulated in these originals and
00460     // then back propagated to shared sources.
00461     VarArray parofpath = nonInputParentsOfPath(inputs, outputs);
00462     for(int i=0; i<parofpath.size(); i++)
00463         copies[(Variable*)parofpath[i]] = (Variable*)parofpath[i];
00464 
00465     // do the deep copying
00466     VarArray new_outputs = outputs;
00467     new_outputs.makeDeepCopyFromShallowCopy(copies);
00468         
00469     return new_outputs;
00470 }
00471 
00472 
00473 // Old Version that uses the old clone system
00474 /*
00475   VarArray Function::operator()(const VarArray& new_inputs) const
00476   {
00477   for(int i=0; i<inputs.size(); i++)
00478   {
00479   if(new_inputs[i]->length()!=inputs[i]->length() || new_inputs[i]->width()!=inputs[i]->width())
00480   PLERROR("In Function::operator()(const VarArray& new_inputs) dimensions of variables in new_inputs and inputs do not match");
00481   inputs[i]->clone_ = new_inputs[i];
00482   }
00483 
00484   VarArray clones(fproppath.size());
00485   for(int i=0; i<fproppath.size(); i++)
00486   clones[i] = fproppath[i]->clone();
00487 
00488   VarArray new_outputs(outputs.size());
00489   for(int i=0; i<outputs.size(); i++)
00490   new_outputs[i] = outputs[i]->clone();
00491 
00492   inputs.clearClone();
00493   fproppath.clearClone();
00494   outputs.clearClone();
00495         
00496   return new_outputs;
00497   }
00498 */
00499 
00501 // verifyHessian //
00503 void Function::verifyHessian(const Vec& input, real step)
00504 {
00505     // Job a Charles...
00506     // Note: L'utilisation de l'option -DUSEDOUBLE dans le Makefile_option
00507     // permet d'eviter certains problemes numeriques d'approximation
00508     // et donc d'utiliser des valeurs de step plus petites
00509     if(outputsize!=1)
00510         PLERROR("In Function::verifyHessian(...) Can verify hessian only for output of size 1");
00511     real out1,out2,out3,out4;
00512     real doublestep = 2*step;
00513     Vec output(1);
00514     Vec gradient(inputsize);
00515     Mat hessian(inputsize,inputsize);
00516     fbbprop(input, output, gradient, hessian);
00517     cerr << "** Verifying hessian computation **" << endl;
00518     cerr << "Input:                " << input;
00519     cerr << "Output:               " << output;
00520     cerr << "Computed  hessian:    " << hessian;    
00521     // Now computing the gradient by finite difference
00522     //
00523     // f(x1+dx1,x2+dx2)-f(x1-dx1,x2+dx2)-f(x1+dx1,x2-dx2)+f(x1-dx1,x2-dx2)
00524     // ------------------------------------------------------------------
00525     //                    2 * dx1 * 2 * dx2
00526     //
00527     Vec newinput1 = input.copy();
00528     Vec newinput2 = input.copy();
00529     Vec newinput3 = input.copy();
00530     Vec newinput4 = input.copy();
00531     Mat finitediffhessian(inputsize,inputsize);
00532     Mat rel(inputsize,inputsize);
00533     double h,f;
00534     for(int i=0; i<inputsize; i++)
00535     {
00536         for(int j=0; j<inputsize; j++)
00537         {
00538             newinput1[i] = newinput1[i]-step;
00539             newinput1[j] = newinput1[j]-step;
00540             newinput2[i] = newinput2[i]+step;
00541             newinput2[j] = newinput2[j]-step;
00542             newinput3[i] = newinput3[i]-step;
00543             newinput3[j] = newinput3[j]+step;
00544             newinput4[i] = newinput4[i]+step;
00545             newinput4[j] = newinput4[j]+step;
00546             fprop(newinput1,output);
00547             out1 = output[0];
00548             fprop(newinput2,output);
00549             out2 = output[0];
00550             fprop(newinput3,output);
00551             out3 = output[0];
00552             fprop(newinput4,output);
00553             out4 = output[0];
00554             finitediffhessian(i,j) = ((out4-out3)/doublestep-(out2-out1)/doublestep)/doublestep;
00555             newinput1[i] = input[i];
00556             newinput1[j] = input[j];
00557             newinput2[i] = input[i];
00558             newinput2[j] = input[j];
00559             newinput3[i] = input[i];
00560             newinput3[j] = input[j];
00561             newinput4[i] = input[i];
00562             newinput4[j] = input[j];
00563         }
00564     }  
00565     cerr << "Estimated hessian:   " << finitediffhessian;
00566     cerr << "-------------------" << endl;
00567     for (int i=0; i<inputsize; i++)
00568     {
00569         for(int j=0; j<inputsize; j++)
00570         {
00571             h = hessian(i,j);
00572             f = finitediffhessian(i,j);
00573             rel(i,j) = 2*fabs(h-f)/(fabs(h)+fabs(f));
00574         }    
00575     }
00576     cerr << "relative difference: " << rel << endl;
00577     cerr << "-------------------" << endl;
00578     cerr << "max relative difference: " << max(rel) << endl;
00579 }
00580 
00581   
00582 
00584 // verifyGradient //
00586 void Function::verifyGradient(const Vec& input, real step, int which_component)
00587 {
00588     if(outputsize!=1)
00589         PLWARNING("In Function::verifyGradient(...) Will verify gradient only for the first output");
00590     Vec output(outputsize);
00591     Vec output_gradient(outputsize);
00592     output_gradient[which_component]=1.0;
00593     Vec gradient(inputsize);
00594     fbprop(input, output, gradient,output_gradient);
00595     perr << "** Verifying gradient computation **" << endl;
00596     perr << "Input:                " << input << endl;
00597     perr << "Output["<<which_component<<"]:            " << output[which_component] << endl;
00598     perr << "Computed  gradient:   " << gradient << endl;
00599     //displayFunction(this,true);
00600     // Now computing the gradient by finite difference
00601     Vec newinput = input.copy();
00602     Vec finitediffgradient(inputsize);
00603     double doublestep = step+step;
00604     for(int i=0; i<inputsize; i++)
00605     {
00606         real in = input[i];
00607         newinput[i] = in+step;
00608         fprop(newinput,output);
00609         real out1 = output[which_component];
00610         newinput[i] = in-step;
00611         fprop(newinput,output);
00612         real out2 = output[which_component];
00613         finitediffgradient[i] = (out1-out2)/doublestep;
00614         newinput[i] = input[i] = in;
00615     }
00616     // copy the original input into the VarArray
00617     fprop(newinput,output);
00618     perr << "Estimated gradient:   " << finitediffgradient << endl;
00619     perr << "-------------------" << endl;
00620   
00621     perr << "relative difference: ";
00622     // 'Safe' relative difference, that does not display a 'nan' when both
00623     // computed and estimated gradients are zero.
00624     Vec num = apply(gradient - finitediffgradient,FABS);
00625     Vec denom = real(0.5)*apply(gradient + finitediffgradient,FABS);
00626     for (int i = 0; i < num.length(); i++)
00627         if (!fast_exact_is_equal(num[i], 0))
00628             num[i] /= denom[i];
00629     perr << num << endl;
00630     //    apply(gradient - finitediffgradient,(tRealFunc)fabs)/(0.5*apply(gradient + finitediffgradient,(tRealFunc)fabs));
00631     perr << "-------------------" << endl;
00632     perr << "max relative difference: ";
00633     // As above, this is a 'safe' relative difference.
00634     // TODO Question: are we re-doing the same computations as above?
00635     num = apply(gradient - finitediffgradient,(tRealFunc)FABS);
00636     denom = real(0.5)*apply(gradient + finitediffgradient,(tRealFunc)FABS);
00637     for (int i = 0; i < num.length(); i++)
00638         if (!fast_exact_is_equal(num[i], 0))
00639             num[i] /= denom[i];
00640     int pos = argmax(num);
00641     perr << max(num) << " (at position " << pos << "/" << num.length()
00642          << ", computed = " << gradient[pos] << " and estimated = "
00643          << finitediffgradient[pos] << ")" << endl;
00644     real norm_gradient = norm(gradient);
00645     real norm_finitediffgradient = norm(finitediffgradient);
00646     real cos_angle = fast_exact_is_equal(norm_gradient*norm_finitediffgradient,
00647                                          0)
00648         ? MISSING_VALUE
00649         : dot(gradient,finitediffgradient) /
00650           (norm_gradient*norm_finitediffgradient);
00651     if (cos_angle > 1)
00652         cos_angle = 1;      // Numerical imprecisions can lead to such situation.
00653     perr << "cos(angle) : " << cos_angle << endl;
00654     perr << "angle : " << ( is_missing(cos_angle) ? MISSING_VALUE
00655                                                 : acos(cos_angle) ) << endl;
00656 }
00657 
00658 void Function::verifyGradient(real minval, real maxval, real step, int which_component)
00659 {
00660     Vec input(inputsize);
00661     fill_random_uniform(input,minval, maxval);
00662     verifyGradient(input, step, which_component);
00663 }
00664 
00665 void Function::verifyGradient(real step, int which_component)
00666 {
00667     Vec input(inputsize);
00668     inputs >> input;
00669     verifyGradient(input, step, which_component);
00670 }
00671 
00673 // verifySymbolicGradient //
00675 void Function::verifySymbolicGradient(const Vec& in)
00676 {
00677     if(in.length()!=inputsize)
00678         PLERROR("In Function::verifySymbolicGradient(const Vec& in) in does not have the size that this function expects");
00679     Vec out(outputsize);
00680     Vec output_gradient(outputsize,1.0);
00681     Vec gradient1(inputsize);
00682     fbprop(in,out,gradient1,output_gradient);
00683     cout << "Bprop computed gradient: " << gradient1 << endl; 
00684     //cout << "Display f proppath" << endl;
00685     //displayFunction(this, true, false);
00686   
00687     Func df = differentiate();
00688     //cout << "Display df proppath" << endl;
00689     Vec gradient2 = df(in);
00690     //displayFunction(df, true, false);
00691     cout << "Symbolically computed gradient: " << gradient2 << endl; 
00692 }
00693 
00694 void Function::verifyrfprop(const Vec& in, real step)
00695 {
00696     //This is developed to make sure that the code of rfprop is correct.
00697   
00698     Vec gradient(inputsize);
00699     Vec rfpropRgradient(inputsize);
00700     Vec fbbRgradient(inputsize);
00701     Mat hessian(inputsize,inputsize);
00702     Vec rel(inputsize);
00703     Vec out(outputsize);
00704     real b,r;
00705     
00706     if(df==0)
00707         df = differentiate();
00708   
00709     fbbprop(in, out, gradient, hessian);
00710     fbbRgradient = transposeProduct(hessian, gradient); 
00711 
00712     df->inputs.copyRValueFrom(gradient);
00713     df->fproppath.rfprop();
00714     df->outputs.copyRValueTo(rfpropRgradient);
00715   
00716     for (int i=0; i<inputsize; i++)
00717     {
00718         b = fbbRgradient[i];
00719         r = rfpropRgradient[i];
00720         if (fast_exact_is_equal(b, 0) && fast_exact_is_equal(r, 0))
00721             rel[i] = 0.0;
00722         else rel[i] = fabs(b-r)/(fabs(b)+fabs(r));
00723     }    
00724     cerr << "max relative difference of H*g between rfprop and fbbprop: " << max(rel) << endl;
00725     //cerr << "max & min of rfprop rgradient: " << max(rfpropRgradient) << " " << min(rfpropRgradient) << endl;
00726     //cerr << "max & min of fbb rgradient: " << max(fbbRgradient) << " " << min(fbbRgradient) << endl;
00727 }
00728 
00729 template <>
00730 void deepCopyField(Func& field, CopiesMap& copies)
00731 {
00732     if (field)
00733         field = static_cast<Function*>(field->deepCopy(copies));
00734 }
00735 
00736 
00737 } // end of namespace PLearn
00738 
00739 
00740 /*
00741   Local Variables:
00742   mode:c++
00743   c-basic-offset:4
00744   c-file-style:"stroustrup"
00745   c-file-offsets:((innamespace . 0)(inline-open . 0))
00746   indent-tabs-mode:nil
00747   fill-column:79
00748   End:
00749 */
00750 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines