PLearn 0.1
DeepReconstructorNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DeepReconstructorNet.cc
00004 //
00005 // Copyright (C) 2007 Simon Lemieux, Pascal Vincent
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Simon Lemieux, Pascal Vincent
00036 
00040 #include "DeepReconstructorNet.h"
00041 #include <plearn/display/DisplayUtils.h>
00042 #include <plearn/var/Var_operators.h>
00043 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00044 #include <plearn/var/ConcatColumnsVariable.h>
00045 #include <plearn/io/load_and_save.h>
00046 #include <plearn/io/MatIO.h>
00047 #include <plearn/math/PRandom.h>
00048 #include <plearn/math/VecStatsCollector.h>
00049 
00050 namespace PLearn {
00051 using namespace std;
00052 
00053 PLEARN_IMPLEMENT_OBJECT(
00054    DeepReconstructorNet,
00055    "ONE LINE DESCRIPTION",
00056    "MULTI-LINE \nHELP");
00057 
00058 DeepReconstructorNet::DeepReconstructorNet()
00059     :supervised_nepochs(pair<int,int>(0,0)),
00060      supervised_min_improvement_rate(-10000),
00061      minibatch_size(1)
00062 {
00063 }
00064 
00065 void DeepReconstructorNet::declareOptions(OptionList& ol)
00066 {
00067     // ### Declare all of this object's options here.
00068     // ### For the "flags" of each option, you should typically specify
00069     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00070     // ### OptionBase::tuningoption. If you don't provide one of these three,
00071     // ### this option will be ignored when loading values from a script.
00072     // ### You can also combine flags, for example with OptionBase::nosave:
00073     // ### (OptionBase::buildoption | OptionBase::nosave)
00074 
00075     declareOption(ol, "unsupervised_nepochs", &DeepReconstructorNet::unsupervised_nepochs,
00076                   OptionBase::buildoption,
00077                   "unsupervised_nepochs[k] contains a pair of integers giving the minimum and\n"
00078                   "maximum number of epochs for the training of layer k+1 (taking layer k"
00079                   "as input). Thus k=0 corresponds to the training of the first hidden layer.");
00080 
00081     declareOption(ol, "unsupervised_min_improvement_rate", &DeepReconstructorNet::unsupervised_min_improvement_rate,
00082                   OptionBase::buildoption,
00083                   "unsupervised_min_improvement_rate[k] should contain the minimum required relative improvement rate\n"
00084                   "for the training of layer k+1 (taking input from layer k.)");
00085 
00086     declareOption(ol, "supervised_nepochs", &DeepReconstructorNet::supervised_nepochs,
00087                   OptionBase::buildoption,
00088                   "");
00089 
00090     declareOption(ol, "supervised_min_improvement_rate", &DeepReconstructorNet::supervised_min_improvement_rate,
00091                   OptionBase::buildoption,
00092                   "supervised_min_improvement_rate contains the minimum required relative improvement rate\n"
00093                   "for the training of the supervised layer.");
00094 
00095     declareOption(ol, "layers", &DeepReconstructorNet::layers,
00096                   OptionBase::buildoption,
00097                   "layers[0] is the input variable ; last layer is final output layer");
00098 
00099     declareOption(ol, "reconstruction_costs", &DeepReconstructorNet::reconstruction_costs,
00100                   OptionBase::buildoption,
00101                   "recontruction_costs[k] is the reconstruction cost for layer[k]");
00102 
00103     declareOption(ol, "reconstruction_costs_names", &DeepReconstructorNet::reconstruction_costs_names,
00104                   OptionBase::buildoption,
00105                   "The names to be given to each of the elements of a vector cost");
00106 
00107     declareOption(ol, "reconstructed_layers", &DeepReconstructorNet::reconstructed_layers,
00108                   OptionBase::buildoption,
00109                   "reconstructed_layers[k] is the reconstruction of layer k from hidden_for_reconstruction[k]\n"
00110                   "(which corresponds to a version of layer k+1. See further explanation of hidden_for_reconstruction.\n");
00111 
00112     declareOption(ol, "hidden_for_reconstruction", &DeepReconstructorNet::hidden_for_reconstruction,
00113                   OptionBase::buildoption,
00114                   "reconstructed_layers[k] is reconstructed from hidden_for_reconstruction[k]\n"
00115                   "which corresponds to a version of layer k+1.\n"
00116                   "hidden_for_reconstruction[k] can however be different from layers[k+1] \n"
00117                   "since e.g. layers[k+1] may be obtained by transforming a clean input, while \n"
00118                   "hidden_for_reconstruction[k] may be obtained by transforming a corrupted input \n");
00119 
00120     declareOption(ol, "reconstruction_optimizers", &DeepReconstructorNet::reconstruction_optimizers,
00121                   OptionBase::buildoption,
00122                   "");
00123 
00124     declareOption(ol, "reconstruction_optimizer", &DeepReconstructorNet::reconstruction_optimizer,
00125                   OptionBase::buildoption,
00126                   "");
00127 
00128     declareOption(ol, "target", &DeepReconstructorNet::target,
00129                   OptionBase::buildoption,
00130                   "");
00131 
00132     declareOption(ol, "supervised_costs", &DeepReconstructorNet::supervised_costs,
00133                   OptionBase::buildoption,
00134                   "");
00135 
00136     declareOption(ol, "supervised_costvec", &DeepReconstructorNet::supervised_costvec,
00137                   OptionBase::learntoption,
00138                   "");
00139 
00140     declareOption(ol, "supervised_costs_names", &DeepReconstructorNet::supervised_costs_names,
00141                   OptionBase::buildoption,
00142                   "");
00143 
00144     declareOption(ol, "minibatch_size", &DeepReconstructorNet::minibatch_size,
00145                   OptionBase::buildoption,
00146                   "");
00147 
00148     declareOption(ol, "supervised_optimizer", &DeepReconstructorNet::supervised_optimizer,
00149                   OptionBase::buildoption,
00150                   "");
00151 
00152     declareOption(ol, "fine_tuning_optimizer", &DeepReconstructorNet::fine_tuning_optimizer,
00153                   OptionBase::buildoption,
00154                   "");
00155 
00156     declareOption(ol, "group_sizes", &DeepReconstructorNet::group_sizes,
00157                   OptionBase::buildoption,
00158                   "");
00159     
00160     // Now call the parent class' declareOptions
00161     inherited::declareOptions(ol);
00162 }
00163 
00164 void DeepReconstructorNet::declareMethods(RemoteMethodMap& rmm)
00165 {
00166     rmm.inherited(inherited::_getRemoteMethodMap_());
00167 
00168     declareMethod(rmm,
00169                   "getParameterValue",
00170                   &DeepReconstructorNet::getParameterValue,
00171                   (BodyDoc("Returns the matValue of the parameter variable with the given name"),
00172                    ArgDoc("varname", "name of the variable searched for"),
00173                    RetDoc("Returns the value of the parameter as a Mat")));
00174 
00175     declareMethod(rmm,
00176                   "getParameterRow",
00177                   &DeepReconstructorNet::getParameterRow,
00178                   (BodyDoc("Returns the matValue of the parameter variable with the given name"),
00179                    ArgDoc("varname", "name of the variable searched for"),
00180                    ArgDoc("n", "row number"),
00181                    RetDoc("Returns the nth row of the value of the parameter as a Mat")));
00182 
00183 
00184 
00185     declareMethod(rmm,
00186                   "listParameterNames",
00187                   &DeepReconstructorNet::listParameterNames,
00188                   (BodyDoc("Returns a list of the names of the parameters"),
00189                    RetDoc("Returns a list of the names of the parameters")));
00190 
00191     declareMethod(rmm,
00192                   "listParameter",
00193                   &DeepReconstructorNet::listParameter,
00194                   (BodyDoc("Returns a list of the parameters"),
00195                    RetDoc("Returns a list of the names")));
00196 
00197     declareMethod(rmm,
00198                   "computeRepresentations",
00199                   &DeepReconstructorNet::computeRepresentations,
00200                   (BodyDoc("Compute the representation of each hidden layer"),
00201                    ArgDoc("input", "the input"),
00202                    RetDoc("The representations")));
00203 
00204     declareMethod(rmm,
00205                   "computeReconstructions",
00206                   &DeepReconstructorNet::computeReconstructions,
00207                   (BodyDoc("Compute the reconstructions of the input of each hidden layer"),
00208                    ArgDoc("input", "the input"),
00209                    RetDoc("The reconstructions")));
00210 
00211     declareMethod(rmm,
00212                    "getMatValue",
00213                    &DeepReconstructorNet::getMatValue,
00214                    (BodyDoc(""),
00215                     ArgDoc("layer", "no of the layer"),
00216                     RetDoc("the matValue")));
00217 
00218     declareMethod(rmm,
00219                    "setMatValue",
00220                    &DeepReconstructorNet::setMatValue,
00221                    (BodyDoc(""),
00222                     ArgDoc("layer", "no of the layer"),
00223                     ArgDoc("values", "the values")));
00224 
00225     declareMethod(rmm,
00226                    "fpropOneLayer",
00227                    &DeepReconstructorNet::fpropOneLayer,
00228                    (BodyDoc(""),
00229                     ArgDoc("layer", "no of the layer"),
00230                     RetDoc("")));
00231 
00232 
00233     declareMethod(rmm,
00234                    "reconstructOneLayer",
00235                    &DeepReconstructorNet::reconstructOneLayer,
00236                    (BodyDoc(""),
00237                     ArgDoc("layer", "no of the layer"),
00238                     RetDoc("")));
00239 
00240     declareMethod(rmm,
00241                    "computeAndSaveLayerActivationStats",
00242                    &DeepReconstructorNet::computeAndSaveLayerActivationStats,
00243                    (BodyDoc("computeAndSaveLayerActivationStats will compute statistics (univariate and bivariate)\n"
00244                             "of the post-nonlinearity activations of a hidden layer on a given dataset:\n"
00245                             "\n"
00246                             "  - It will compute a matrix of simple statistics for all units of that layer and \n"
00247                             "    save it in filebasename_all_simplestats.pmat \n"
00248                             "  - It will also select a subset of the units made of the first nfirstunits units \n"
00249                             "    and of notherunits randomly selected units among the others.\n"
00250                             "    For this selected subset more extensive statistics are computed and saved:\n"
00251                             "      + a VecStatsCollector collecting univariate histograms and bivariate\n"
00252                             "        covariance will be saved in filebasename_selected_statscol.psave\n"
00253                             "      + a matrix of bivariate histograms will be saved as \n"
00254                             "        filebasename_selected_bihist.pmat \n"
00255                             "        Row i*nselectedunits+j of that matrix will contain the 5*5 bivariate\n"
00256                             "        histogram for the activations of selected_unit_i vs selected_unit_j.\n"
00257                             "\n"
00258                             "which_layer: 1 means first hidden layer, 2, second hidden layer, etc... \n"),
00259                     ArgDoc("dataset", "the data vmatrix to compute activaitons on"),
00260                     ArgDoc("which_layer", "the layer (1 for first hidden layer)"),
00261                     ArgDoc("filebasename", "basename for generated files"),
00262                     ArgDoc("nfirstunits", "number of first units to select for extensive stats."),
00263                     ArgDoc("notherunits", "number of other units to select for extensive stats.")
00264                     ));
00265 
00266 }
00267 
00268 void DeepReconstructorNet::build_()
00269 {
00270     // ### This method should do the real building of the object,
00271     // ### according to set 'options', in *any* situation.
00272     // ### Typical situations include:
00273     // ###  - Initial building of an object from a few user-specified options
00274     // ###  - Building of a "reloaded" object: i.e. from the complete set of
00275     // ###    all serialised options.
00276     // ###  - Updating or "re-building" of an object after a few "tuning"
00277     // ###    options have been modified.
00278     // ### You should assume that the parent class' build_() has already been
00279     // ### called.
00280     
00281     int nlayers = layers.length();
00282     compute_layer.resize(nlayers-1);
00283     for(int k=0; k<nlayers-1; k++)
00284         compute_layer[k] = Func(layers[k], layers[k+1]);
00285     compute_output = Func(layers[0], layers[nlayers-1]);
00286     nout = layers[nlayers-1]->size();
00287 
00288     output_and_target_to_cost = Func(layers[nlayers-1]&target, supervised_costvec); 
00289 
00290 
00291     if(supervised_costs.isNull())
00292         PLERROR("You must provide a supervised_cost");
00293 
00294     supervised_costvec = hconcat(supervised_costs);
00295 
00296     if(supervised_costs.length()>0)
00297         fullcost += supervised_costs[0];
00298     for(int i=1; i<supervised_costs.length(); i++)
00299         fullcost += supervised_costs[i];
00300     
00301     int n_rec_costs = reconstruction_costs.length();
00302     for(int k=0; k<n_rec_costs; k++)
00303         fullcost += reconstruction_costs[k];
00304     //displayVarGraph(fullcost);
00305     Var input = layers[0];
00306     Func f(input&target, fullcost);
00307     parameters = f->parameters;
00308     outmat.resize(n_rec_costs);
00309 
00310 
00311     // older versions did not specify hidden_for_reconstruction
00312     // if it's not there, let's try to infer it
00313     if( (reconstructed_layers.length()!=0) && (hidden_for_reconstruction.length()==0) ) 
00314     {
00315         int n = reconstructed_layers.length();
00316         for(int k=0; k<n; k++)
00317         {
00318             VarArray proppath = propagationPath(layers[k+1],reconstructed_layers[k]);
00319             if(proppath.length()>0) // great, we found a path from layers[k+1] !
00320                 hidden_for_reconstruction.append(layers[k+1]);
00321             else // ok this is getting much more difficult, let's try to guess
00322             {
00323                 // let's consider the full path from sources to reconstructed_layers[k]
00324                 VarArray fullproppath = propagationPath(reconstructed_layers[k]);
00325                 // look for a variable with same type and dimension as layers[k+1]
00326                 int pos;
00327                 for(pos = fullproppath.length()-2; pos>=0; pos--)
00328                 {
00329                     if( fullproppath[pos]->length()    == layers[k+1]->length() &&
00330                         fullproppath[pos]->width()     == layers[k+1]->width() &&
00331                         fullproppath[pos]->classname() == layers[k+1]->classname() )
00332                         break; // found a matching one!
00333                 }
00334                 if(pos>=0) // found a match at pos, let's use it
00335                 {
00336                     hidden_for_reconstruction.append(fullproppath[pos]);
00337                     perr << "Found match for hidden_for_reconstruction " << k << endl;
00338                     //displayVarGraph(propagationPath(hidden_for_reconstruction[k],reconstructed_layers[k])
00339                     //                ,true, 333, "reconstr");        
00340                 }
00341                 else
00342                 {
00343                     PLERROR("Unable to guess hidden_for_reconstruction variable. Unable to find match.");
00344                 }
00345             }
00346         }
00347     }
00348 
00349     if( reconstructed_layers.length() != hidden_for_reconstruction.length() )
00350         PLERROR("reconstructed_layers and hidden_for_reconstruction should have the same number of elements.");
00351 }
00352 
00353 // ### Nothing to add here, simply calls build_
00354 void DeepReconstructorNet::build()
00355 {
00356     if(random_gen.isNull())
00357         random_gen = new PRandom();
00358     inherited::build();
00359     build_();
00360 }
00361 
00362 void DeepReconstructorNet::initializeParams(bool set_seed)
00363 {
00364     perr << "Initializing parameters..." << endl;
00365     if (set_seed && seed_ != 0)
00366         random_gen->manual_seed(seed_);
00367 
00368     for(int i=0; i<parameters.length(); i++)
00369         dynamic_cast<SourceVariable*>((Variable*)parameters[i])->randomInitialize(random_gen);
00370 }
00371 
00372 
00373 void DeepReconstructorNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00374 {
00375     inherited::makeDeepCopyFromShallowCopy(copies);
00376 
00377     deepCopyField(unsupervised_nepochs, copies);
00378     deepCopyField(unsupervised_min_improvement_rate, copies);
00379     deepCopyField(supervised_nepochs, copies);
00380     deepCopyField(supervised_min_improvement_rate, copies);
00381 
00382     deepCopyField(layers, copies);
00383     deepCopyField(reconstruction_costs, copies);
00384     deepCopyField(reconstructed_layers, copies);
00385     deepCopyField(hidden_for_reconstruction, copies);
00386     deepCopyField(reconstruction_optimizers, copies);
00387     deepCopyField(reconstruction_optimizer, copies);
00388     varDeepCopyField(target, copies);
00389     deepCopyField(supervised_costs, copies);
00390     varDeepCopyField(supervised_costvec, copies);
00391     deepCopyField(supervised_costs_names, copies);
00392     varDeepCopyField(fullcost, copies);    
00393     deepCopyField(parameters,copies);
00394     deepCopyField(supervised_optimizer, copies);
00395     deepCopyField(fine_tuning_optimizer, copies);
00396 
00397     deepCopyField(compute_layer, copies);
00398     deepCopyField(compute_output, copies);
00399     deepCopyField(output_and_target_to_cost, copies);
00400     // deepCopyField(outmat, copies); // deep copying vmatrices, especially if opened in write mode, is probably a bad idea
00401     deepCopyField(group_sizes, copies);
00402 }
00403 
00404 int DeepReconstructorNet::outputsize() const
00405 {
00406     // Compute and return the size of this learner's output (which typically
00407     // may depend on its inputsize(), targetsize() and set options).
00408 
00409     //TODO : retourner la bonne chose ici
00410     return 0;
00411 }
00412 
00413 void DeepReconstructorNet::forget()
00414 {
00418 
00425     inherited::forget();
00426     initializeParams();    
00427 }
00428 
00429 void DeepReconstructorNet::train()
00430 {
00431     // The role of the train method is to bring the learner up to
00432     // stage==nstages, updating train_stats with training costs measured
00433     // on-line in the process.
00434 
00435     // This generic PLearner method does a number of standard stuff useful for
00436     // (almost) any learner, and return 'false' if no training should take
00437     // place. See PLearner.h for more details.
00438     if (!initTrain())
00439         return;
00440 
00441     while(stage<nstages)
00442     {
00443         if(stage<1)
00444         {
00445             PPath outmatfname = expdir/"outmat";
00446 
00447             int nreconstructions = reconstruction_costs.length();
00448             int insize = train_set->inputsize();
00449             VMat inputs = train_set.subMatColumns(0,insize);
00450             VMat targets = train_set.subMatColumns(insize, train_set->targetsize());
00451             VMat dset = inputs;
00452 
00453             bool must_train_supervised_layer = supervised_nepochs.second>0;
00454             
00455             PLearn::save(expdir/"learner.psave", *this);
00456             for(int k=0; k<nreconstructions; k++)
00457             {
00458                 trainHiddenLayer(k, dset);
00459                 PLearn::save(expdir/"learner.psave", *this, PStream::plearn_binary, false);
00460                 // 'if' is a hack to avoid precomputing last hidden layer if not needed
00461                 if(k<nreconstructions-1 ||  must_train_supervised_layer) 
00462                 { 
00463                     int width = layers[k+1].width();
00464                     outmat[k] = new FileVMatrix(outmatfname+tostring(k+1)+".pmat",0,width);
00465                     outmat[k]->defineSizes(width,0);
00466                     buildHiddenLayerOutputs(k, dset, outmat[k]);
00467                     dset = outmat[k];
00468                 }
00469             }
00470 
00471             if(must_train_supervised_layer)
00472             {
00473                 trainSupervisedLayer(dset, targets);
00474                 PLearn::save(expdir/"learner.psave", *this);
00475             }
00476 
00477             for(int k=0; k<reconstruction_costs.length(); k++)
00478               {
00479                 if(outmat[k].isNotNull())
00480                   {
00481                     perr << "Closing outmat " << k+1 << endl;
00482                     outmat[k] = 0;
00483                   }
00484               }
00485             
00486             perr << "\n\n*********************************************" << endl;
00487             perr << "****      Now performing fine tuning     ****" << endl;
00488             perr << "********************************************* \n" << endl;
00489 
00490         }
00491         else
00492         {
00493             perr << "+++ Fine tuning stage " << stage+1 << " **" << endl;
00494             prepareForFineTuning();
00495             fineTuningFor1Epoch();
00496         }
00497         ++stage;
00498         train_stats->finalize(); // finalize statistics for this epoch
00499     }
00500     /*
00501     while(stage<nstages)
00502     {        
00503         // clear statistics of previous epoch
00504         train_stats->forget();
00505 
00506         //... train for 1 stage, and update train_stats,
00507         // using train_set->getExample(input, target, weight)
00508         // and train_stats->update(train_costs)
00509 
00510         ++stage;
00511         train_stats->finalize(); // finalize statistics for this epoch
00512     }
00513     */
00514 }
00515 
00516 void DeepReconstructorNet::buildHiddenLayerOutputs(int which_input_layer, VMat inputs, VMat outputs)
00517 {
00518     int l = inputs.length();
00519     Vec in;
00520     Vec target;
00521     real weight;
00522     Func f = compute_layer[which_input_layer];
00523     Vec out(f->outputsize);
00524     for(int i=0; i<l; i++)
00525     {
00526         inputs->getExample(i,in,target,weight);
00527         f->fprop(in, out);
00528         /*
00529         if(i==0)
00530         {
00531             perr << "Function used for building hidden layer " << which_input_layer << endl;
00532             displayFunction(f, true);
00533         }
00534         */
00535         outputs->putOrAppendRow(i,out);
00536     }
00537     outputs->flush();
00538 }
00539 
00540 void DeepReconstructorNet::prepareForFineTuning()
00541 {
00542     Func f(layers[0]&target, supervised_costvec);
00543     Var totalcost = sumOf(train_set, f, minibatch_size);
00544     perr << "Function used for fine tuning" << endl;
00545     // displayFunction(f, true);
00546     // displayVarGraph(supervised_costvec);
00547     // displayVarGraph(totalcost);
00548 
00549     VarArray params = totalcost->parents();
00550     fine_tuning_optimizer->setToOptimize(params, totalcost);
00551 }
00552 
00553 
00554 TVec<Mat> DeepReconstructorNet::computeRepresentations(Mat input)
00555 {
00556     int nlayers = layers.length();
00557     TVec<Mat> representations(nlayers);
00558     VarArray proppath = propagationPath(layers[0],layers[nlayers-1]);
00559     layers[0]->matValue << input;
00560     proppath.fprop();
00561     // perr << "Graph for computing representations" << endl;
00562     // displayVarGraph(proppath,true, 333, "repr");
00563     for(int k=0; k<nlayers; k++)
00564         representations[k] = layers[k]->matValue.copy();
00565     return representations;
00566 }
00567 
00568 
00569 void DeepReconstructorNet::reconstructInputFromLayer(int layer)
00570 {
00571     for(int k=layer; k>0; k--)
00572         layers[k-1]->matValue << reconstructOneLayer(k);
00573     /*
00574     for(int k=layer; k>0; k--)
00575     {
00576         VarArray proppath = propagationPath(hidden_for_reconstruction[k-1],reconstructed_layers[k-1]);
00577 
00578         perr << "RECONSTRUCTING reconstructed_layers["<<k-1
00579              << "] from layers["<< k
00580              << "] " << endl;
00581         perr << "proppath:" << endl;
00582         perr << proppath << endl;
00583         perr << "proppath length: " << proppath.length() << endl;
00584 
00585         //perr << ">>>> reconstructed layers before fprop:" << endl;
00586         //perr << reconstructed_layers[k-1]->matValue << endl;
00587 
00588         proppath.fprop();
00589 
00590         //perr << ">>>> reconstructed layers after fprop:" << endl;
00591         //perr << reconstructed_layers[k-1]->matValue << endl;
00592 
00593         perr << "Graph for reconstructing layer " << k-1 << " from layer " << k << endl;
00594         displayVarGraph(proppath,true, 333, "reconstr");        
00595 
00596         //WARNING MEGA-HACK
00597         if (reconstructed_layers[k-1].width() == 2*layers[k-1].width())
00598         {
00599             Mat temp(layers[k-1].length(), layers[k-1].width());
00600             for (int n=0; n < layers[k-1].length(); n++)
00601                 for (int i=0; i < layers[k-1].width(); i++)
00602                     temp(n,i) = reconstructed_layers[k-1]->matValue(n,i*2);
00603             temp >> layers[k-1]->matValue;
00604         }        
00605         //END OF MEGA-HACK
00606         else
00607             reconstructed_layers[k-1]->matValue >> layers[k-1]->matValue;
00608     }
00609     */
00610 }
00611 
00612 TVec<Mat> DeepReconstructorNet::computeReconstructions(Mat input)
00613 {
00614     int nlayers = layers.length();
00615     VarArray proppath = propagationPath(layers[0],layers[nlayers-1]);
00616     layers[0]->matValue << input;
00617     proppath.fprop();
00618 
00619     TVec<Mat> reconstructions(nlayers-2);
00620     for(int k=1; k<nlayers-1; k++)
00621     {
00622         reconstructInputFromLayer(k);
00623         reconstructions[k-1] = layers[0]->matValue.copy();
00624     }
00625     return reconstructions;
00626 }
00627 
00628 
00629 void DeepReconstructorNet::fineTuningFor1Epoch()
00630 {
00631     if(train_stats.isNull())
00632         train_stats = new VecStatsCollector();
00633 
00634     int l = train_set->length();
00635     fine_tuning_optimizer->reset();
00636     fine_tuning_optimizer->nstages = l/minibatch_size;
00637     fine_tuning_optimizer->optimizeN(*train_stats);
00638 }
00639 
00640 /*
00641 void DeepReconstructorNet::fineTuningFullOld()
00642 {
00643     prepareForFineTuning();
00644 
00645     int l = train_set->length();
00646     int nepochs = nstages;
00647     perr << "\n\n*********************************************" << endl;
00648     perr << "*** Performing fine tuning for max. " << nepochs << " epochs " << endl;
00649     perr << "*** each epoch has " << l << " examples and " << l/minibatch_size << " optimizer stages (updates)" << endl;
00650 
00651     VecStatsCollector st;
00652     real prev_mean = -1;
00653     real relative_improvement = fine_tuning_improvement_rate;
00654     for(int n=0; n<nepochs && relative_improvement >= fine_tuning_improvement_rate; n++)
00655     {
00656         st.forget();
00657         fine_tuning_optimizer->nstages = l/minibatch_size;
00658         fine_tuning_optimizer->optimizeN(st);
00659         const StatsCollector& s = st.getStats(0);
00660         real m = s.mean();
00661         perr << "Epoch " << n+1 << " mean error: " << m << " +- " << s.stderror() << endl;
00662         if(prev_mean>0)
00663         {
00664             relative_improvement = ((prev_mean-m)/prev_mean)*100;
00665             perr << "Relative improvement: " << relative_improvement << " %"<< endl;
00666         }
00667         prev_mean = m;
00668     }
00669 }
00670 */
00671 
00672 void DeepReconstructorNet::trainSupervisedLayer(VMat inputs, VMat targets)
00673 {
00674     int l = inputs->length();
00675     pair<int,int> nepochs = supervised_nepochs;
00676     real min_improvement = supervised_min_improvement_rate;
00677 
00678     int last_hidden_layer = layers.length()-2;
00679     perr << "\n\n*********************************************" << endl;
00680     perr << "*** Training only supervised layer for max. " << nepochs.second << " epochs " << endl;
00681     perr << "*** each epoch has " << l << " examples and " << l/minibatch_size << " optimizer stages (updates)" << endl;
00682 
00683     Func f(layers[last_hidden_layer]&target, supervised_costvec);
00684     // displayVarGraph(supervised_costvec);
00685     VMat inputs_targets = hconcat(inputs, targets);
00686     inputs_targets->defineSizes(inputs.width(),targets.width());
00687 
00688     Var totalcost = sumOf(inputs_targets, f, minibatch_size);
00689     // displayVarGraph(totalcost);
00690 
00691     VarArray params = totalcost->parents();
00692     supervised_optimizer->setToOptimize(params, totalcost);
00693     supervised_optimizer->reset();
00694 
00695     TVec<string> colnames;
00696     VMat training_curve;
00697     Vec costrow;
00698 
00699     colnames.append("nepochs");
00700     colnames.append("relative_improvement");
00701     int ncosts=supervised_costs_names.length();
00702     for(int k=0; k<ncosts; k++)
00703     {
00704         colnames.append(supervised_costs_names[k]+"_mean");
00705         colnames.append(supervised_costs_names[k]+"_stderr");
00706     }
00707     training_curve = new FileVMatrix(expdir/"training_costs_output.pmat",0,colnames);
00708     costrow.resize(colnames.length());
00709 
00710     VecStatsCollector st;
00711     real prev_mean = -1;
00712     real relative_improvement = 1000;
00713     for(int n=0; n<nepochs.first || (n<nepochs.second && relative_improvement >= min_improvement); n++)
00714     {
00715         st.forget();
00716         supervised_optimizer->nstages = l/minibatch_size;
00717         supervised_optimizer->optimizeN(st);
00718         const StatsCollector& s = st.getStats(0);
00719         real m = s.mean();
00720         Vec means = st.getMean();
00721         Vec stderrs = st.getStdError();
00722         perr << "Epoch " << n+1 << " Mean costs: " << means << " stderr " << stderrs << endl;
00723         perr << "mean error: " << m << " +- " << s.stderror() << endl;
00724         if(prev_mean>0)
00725         {
00726             relative_improvement = (prev_mean-m)/prev_mean;
00727             perr << "Relative improvement: " << relative_improvement*100 << " %"<< endl;
00728         }
00729         prev_mean = m;
00730         //displayVarGraph(supervised_costvec, true);
00731 
00732         // save to a file
00733         costrow[0] = (real)n+1;
00734         costrow[1] = relative_improvement*100;
00735         for(int k=0; k<ncosts; k++) {
00736             costrow[2+k*2] = means[k];
00737             costrow[2+k*2+1] = stderrs[k];
00738         }
00739         training_curve->appendRow(costrow);
00740         training_curve->flush();
00741 
00742     }
00743     
00744 }
00745 
00746 void DeepReconstructorNet::trainHiddenLayer(int which_input_layer, VMat inputs)
00747 {
00748     int l = inputs->length();
00749     pair<int,int> nepochs = unsupervised_nepochs[which_input_layer];
00750     real min_improvement = -10000;
00751     if(unsupervised_min_improvement_rate.length()!=0)
00752         min_improvement = unsupervised_min_improvement_rate[which_input_layer];
00753     perr << "\n\n*********************************************" << endl;
00754     perr << "*** Training (unsupervised) layer " << which_input_layer+1 << " for max. " << nepochs.second << " epochs " << endl;
00755     perr << "*** each epoch has " << l << " examples and " << l/minibatch_size << " optimizer stages (updates)" << endl;
00756     Func f(layers[which_input_layer], reconstruction_costs[which_input_layer]);
00757     Var totalcost = sumOf(inputs, f, minibatch_size);
00758     VarArray params = totalcost->parents();
00759     //displayVarGraph(reconstruction_costs[which_input_layer]);
00760     //displayFunction(f,false,false, 333, "train_func");
00761     //displayVarGraph(totalcost,true);
00762     
00763     if ( reconstruction_optimizers.size() !=0 )
00764     {
00765         reconstruction_optimizers[which_input_layer]->setToOptimize(params, totalcost);
00766         reconstruction_optimizers[which_input_layer]->reset();    
00767     }
00768     else 
00769     {
00770         reconstruction_optimizer->setToOptimize(params, totalcost);
00771         reconstruction_optimizer->reset();    
00772     }
00773 
00774     Vec costrow;
00775     TVec<string> colnames;
00776     VMat training_curve;
00777 
00778     VecStatsCollector st;
00779     real prev_mean = -1;
00780     real relative_improvement = 1000;
00781     for(int n=0; n<nepochs.first || (n<nepochs.second && relative_improvement >= min_improvement); n++)
00782     {
00783         st.forget();
00784         if ( reconstruction_optimizers.size() !=0 )
00785         {
00786             reconstruction_optimizers[which_input_layer]->nstages = l/minibatch_size;
00787             reconstruction_optimizers[which_input_layer]->optimizeN(st);
00788         }
00789         else 
00790         {
00791             reconstruction_optimizer->nstages = l/minibatch_size;
00792             reconstruction_optimizer->optimizeN(st);
00793         }        
00794         int reconstr_cost_pos = 0;
00795 
00796         Vec means = st.getMean();
00797         Vec stderrs = st.getStdError();
00798         perr << "Epoch " << n+1 << ": " << means << " +- " << stderrs;
00799         real m = means[reconstr_cost_pos];
00800         // real er = stderrs[reconstr_cost_pos];
00801         if(n>0)
00802         {
00803             relative_improvement = (prev_mean-m)/fabs(prev_mean);
00804             perr << "  improvement: " << relative_improvement*100 << " %";
00805         }
00806         perr << endl;
00807 
00808         int ncosts = means.length();
00809         if(reconstruction_costs_names.length()!=ncosts)
00810         {
00811             reconstruction_costs_names.resize(ncosts);
00812             for(int k=0; k<ncosts; k++)
00813                 reconstruction_costs_names[k] = "cost"+tostring(k);
00814         }
00815 
00816         if(colnames.length()==0)
00817         {
00818             colnames.append("nepochs");
00819             colnames.append("relative_improvement");
00820             for(int k=0; k<ncosts; k++)
00821             {
00822                 colnames.append(reconstruction_costs_names[k]+"_mean");
00823                 colnames.append(reconstruction_costs_names[k]+"_stderr");
00824             }
00825             training_curve = new FileVMatrix(expdir/"training_costs_layer_"+tostring(which_input_layer+1)+".pmat",0,colnames);
00826         }
00827 
00828         costrow.resize(colnames.length());
00829 //        int k=0;
00830         costrow[0] = (real)n+1;
00831         costrow[1] = relative_improvement*100;
00832         for(int k=0; k<ncosts; k++)
00833         {
00834             costrow[2+k*2] = means[k];
00835             costrow[2+k*2+1] = stderrs[k];
00836         }
00837         training_curve->appendRow(costrow);
00838         training_curve->flush();
00839 
00840         prev_mean = m;
00841 
00842         // save_learner_after_each_pretraining_epoch
00843         PLearn::save(expdir/"learner.psave", *this, PStream::plearn_binary, false);
00844 
00845         /*
00846         if(n==0)
00847         {
00848             perr << "Displaying reconstruciton_cost" << endl;
00849             displayVarGraph(reconstruction_costs[which_input_layer],true);
00850             perr << "Displaying optimized funciton f" << endl;
00851             displayFunction(f,true,false, 333, "train_func");
00852         }
00853         */
00854     }
00855 }
00856 
00876 void DeepReconstructorNet::computeAndSaveLayerActivationStats(VMat dataset, int which_layer, const string& filebasename, int nfirstunits, int notherunits)
00877 {
00878     int len = dataset.length();
00879     Var layer = layers[which_layer];
00880     int layersize = layer->size();
00881     Mat actstats(1+layersize,6);
00882     actstats.fill(0.);
00883     TVec<string> actstatsfields(6);
00884     actstatsfields[0] = "E[act]";
00885     actstatsfields[1] = "E[act^2]";
00886     actstatsfields[2] = "[0,.25)";
00887     actstatsfields[3] = "[.25,.50)";
00888     actstatsfields[4] = "[.50,.75)";
00889     actstatsfields[5] = "[.75,1.00]";
00890 
00891     // build the list of indexes of the units for which we want to keep bivariate statistics
00892     // we will take the nfirstunits first units, and notherunits at random from the rest.
00893     // resulting list of indices will be put in unitindexes.
00894     TVec<int> unitindexes(0,nfirstunits-1,1);
00895     if(notherunits>0)
00896     {
00897         TVec<int> randomindexes(notherunits, layersize, 1);
00898         PRandom rnd;
00899         rnd.shuffleElements(randomindexes);
00900         randomindexes = randomindexes.subVec(0,notherunits);
00901         unitindexes = concat(unitindexes, randomindexes);
00902     }
00903     int nselectunits = unitindexes.length();
00904     Vec selectedactivations(nselectunits); // will hold the activations of the selected units
00905 
00906     TVec<string> fieldnames(nselectunits);
00907     for(int k=0; k<nselectunits; k++)
00908         fieldnames[k] = tostring(unitindexes[k]);
00909     VecStatsCollector stcol;
00910     stcol.maxnvalues = 20;
00911     stcol.compute_covariance = true;
00912     stcol.setFieldNames(fieldnames);
00913     stcol.build();
00914 
00915     const int nbins = 5;
00916     // bivariate nbins*nbins histograms will be computed for each of the nselectunits*nselectunits pairs of units
00917     Mat bihist(nselectunits*nselectunits, nbins*nbins);
00918     bihist.fill(0.);
00919     TVec<string> bihistfields(nbins*nbins);
00920     for(int k=0; k<nbins*nbins; k++)
00921         bihistfields[k] = tostring(1+k/nbins)+","+tostring(1+k%nbins);
00922         
00923     Vec input;
00924     Vec target;
00925     real weight;
00926     Vec output;
00927 
00928     for(int t=0; t<len; t++)
00929     {
00930         dataset.getExample(t, input, target, weight);
00931         computeOutput(input, output);
00932         Vec activations = layer->value;
00933         
00934         // collect simple univariate stats for all units
00935         for(int k=0; k<layersize; k++)
00936         {
00937             real act = activations[k];
00938             actstats(k+1,0) += act;
00939             actstats(k+1,1) += act*act;
00940             if(act<0.25)
00941                 actstats(k+1,2)++;
00942             else if(act<0.50)
00943                 actstats(k+1,3)++;
00944             else if(act<0.75)
00945                 actstats(k+1,4)++;   
00946             else
00947                 actstats(k+1,5)++;
00948         }
00949 
00950         // collect more extensive stats for selected units
00951         for(int k=0; k<nselectunits; k++)
00952             selectedactivations[k] = activations[unitindexes[k]];
00953 
00954         stcol.update(selectedactivations);
00955 
00956         // collect bivariate histograms for selected units
00957         for(int i=0; i<nselectunits; i++)
00958         {
00959             real act_i = selectedactivations[i];
00960             
00961             int binpos_i = int(act_i*nbins);
00962             if(binpos_i<0)
00963                 binpos_i = 0;
00964             else if(binpos_i>=nbins)
00965                 binpos_i = nbins-1;
00966 
00967             for(int j=0; j<nselectunits; j++)
00968             {
00969                 real act_j = selectedactivations[j];
00970                 int binpos_j = int(act_j*nbins);
00971                 if(binpos_j<0)
00972                     binpos_j = 0;
00973                 else if(binpos_j>=nbins)
00974                     binpos_j = nbins-1;
00975                 
00976                 bihist(i*nselectunits+j, nbins*binpos_i+binpos_j)++;
00977             }
00978         }
00979     }
00980 
00981     stcol.finalize();
00982     PLearn::save(filebasename+"_selected_statscol.psave", stcol);
00983 
00984     bihist *= 1./len;
00985     string pmatfilename = filebasename+"_selected_bihist.pmat";
00986     savePMat(pmatfilename, bihist);
00987     savePMatFieldnames(pmatfilename, bihistfields);
00988 
00989     actstats *= 1./len;
00990     Vec meanvec = actstats(0);
00991     columnMean(actstats.subMat(1,0,layersize,6), meanvec);
00992     pmatfilename = filebasename+"_all_simplestats.pmat";
00993     savePMat(pmatfilename, actstats);
00994     savePMatFieldnames(pmatfilename, actstatsfields);
00995 }
00996 
00997 void DeepReconstructorNet::computeOutput(const Vec& input, Vec& output) const
00998 {
00999     output.resize(nout);
01000     compute_output->fprop(input, output);
01001 }
01002 
01003 void DeepReconstructorNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
01004                                            const Vec& target, Vec& costs) const
01005 {
01006     costs.resize(supervised_costs_names.length());
01007     output_and_target_to_cost->fprop(output&target, costs);
01008 }
01009 
01010 TVec<string> DeepReconstructorNet::getTestCostNames() const
01011 {
01012     return supervised_costs_names;
01013 }
01014 
01015 TVec<string> DeepReconstructorNet::getTrainCostNames() const
01016 { 
01017     return supervised_costs_names;
01018 }
01019 
01020 Mat DeepReconstructorNet::getParameterValue(const string& varname)
01021 {
01022     for(int i=0; i<parameters.length(); i++)
01023         if(parameters[i]->getName() == varname)
01024             return parameters[i]->matValue;
01025     PLERROR("There is no parameter  named %s", varname.c_str());
01026     return Mat(0,0);
01027 }
01028 
01029 
01030 Vec DeepReconstructorNet::getParameterRow(const string& varname, int n)
01031 {
01032     for(int i=0; i<parameters.length(); i++)
01033         if(parameters[i]->getName() == varname)
01034             return parameters[i]->matValue(n);
01035     PLERROR("There is no parameter  named %s", varname.c_str());
01036     return Vec(0);
01037 }
01038 
01039 TVec<string> DeepReconstructorNet::listParameterNames()
01040 {
01041     TVec<string> nameListe(0);
01042     for (int i=0; i<parameters.length(); i++)
01043         if (parameters[i]->getName() != "")
01044             nameListe.append(parameters[i]->getName());
01045     return nameListe;
01046 }
01047 
01048 TVec<Mat> DeepReconstructorNet::listParameter()
01049 {
01050     TVec<Mat> matList(0);
01051     for (int i=0; i<parameters.length(); i++)
01052         matList.append(parameters[i]->matValue);
01053     return matList;
01054 }
01055 
01056 
01057 Mat DeepReconstructorNet::getMatValue(int layer)
01058 {
01059     return layers[layer]->matValue;
01060 }
01061 
01062 void DeepReconstructorNet::setMatValue(int layer, Mat values)
01063 {
01064     layers[layer]->matValue << values;
01065 }
01066 
01067 Mat DeepReconstructorNet::fpropOneLayer(int layer)
01068 {
01069     VarArray proppath = propagationPath( layers[layer], layers[layer+1] );
01070     proppath.fprop();
01071     return getMatValue(layer+1);
01072 }
01073 
01074 Mat DeepReconstructorNet::reconstructOneLayer(int layer)
01075 {
01076     layers[layer]->matValue >> hidden_for_reconstruction[layer-1]->matValue;
01077     VarArray proppath = propagationPath(hidden_for_reconstruction[layer-1],reconstructed_layers[layer-1]);
01078     proppath.fprop();       
01079     return reconstructed_layers[layer-1]->matValue;
01080 }
01081 
01082 
01083 } // end of namespace PLearn
01084 
01085 
01086 /*
01087   Local Variables:
01088   mode:c++
01089   c-basic-offset:4
01090   c-file-style:"stroustrup"
01091   c-file-offsets:((innamespace . 0)(inline-open . 0))
01092   indent-tabs-mode:nil
01093   fill-column:79
01094   End:
01095 */
01096 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines