PLearn 0.1
NeuralNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NeuralNet.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* *******************************************************      
00038  * $Id: NeuralNet.cc 8321 2007-11-28 21:37:09Z nouiz $
00039  ******************************************************* */
00040 
00041 
00042 #include <plearn/var/AffineTransformVariable.h>
00043 #include <plearn/var/AffineTransformWeightPenalty.h>
00044 #include <plearn/var/BinaryClassificationLossVariable.h>
00045 #include <plearn/var/ClassificationLossVariable.h>
00046 #include <plearn/var/ConcatColumnsVariable.h>
00047 #include <plearn/var/CrossEntropyVariable.h>
00048 #include <plearn/var/ExpVariable.h>
00049 #include <plearn/var/IfThenElseVariable.h>
00050 #include <plearn/var/LiftOutputVariable.h>
00051 #include <plearn/var/LogSoftmaxVariable.h>
00052 #include <plearn/var/MulticlassLossVariable.h>
00053 #include <plearn/var/NegCrossEntropySigmoidVariable.h>
00054 #include <plearn/var/OneHotSquaredLoss.h>
00055 #include <plearn/var/SemiSupervisedProbClassCostVariable.h>
00056 #include <plearn/var/SigmoidVariable.h>
00057 #include <plearn/var/SoftmaxVariable.h>
00058 #include <plearn/var/SoftplusVariable.h>
00059 #include <plearn/var/SourceVariable.h>
00060 #include <plearn/var/SubMatVariable.h>
00061 #include <plearn/var/SumVariable.h>
00062 #include <plearn/var/SumOfVariable.h>
00063 #include <plearn/var/SumSquareVariable.h>
00064 #include <plearn/var/TanhVariable.h>
00065 #include <plearn/var/TransposeProductVariable.h>
00066 #include <plearn/var/Var_operators.h>
00067 #include <plearn/var/Var_utils.h>
00068 #include <plearn/var/WeightedSumSquareVariable.h>
00069 
00070 #include "NeuralNet.h"
00071 //#include "DisplayUtils.h"
00072 #include <plearn/math/random.h>
00073 //#include "GradientOptimizer.h"
00074 #include <plearn/var/SemiSupervisedProbClassCostVariable.h>
00075 #include <plearn/var/IsMissingVariable.h>
00076 
00077 namespace PLearn {
00078 using namespace std;
00079 
00080 
00081 PLEARN_IMPLEMENT_OBJECT(NeuralNet, "DEPRECATED: Use NNet instead", "NO HELP");
00082 
00083 NeuralNet::NeuralNet()
00084     :nhidden(0),
00085      nhidden2(0),
00086      weight_decay(0),
00087      bias_decay(0),
00088      layer1_weight_decay(0),
00089      layer1_bias_decay(0),
00090      layer2_weight_decay(0),
00091      layer2_bias_decay(0),
00092      output_layer_weight_decay(0),
00093      output_layer_bias_decay(0),
00094      direct_in_to_out_weight_decay(0),
00095      direct_in_to_out(false),
00096      output_transfer_func(""),
00097      iseed(-1),
00098      semisupervised_flatten_factor(1),
00099      batch_size(1),
00100      nepochs(10000),
00101      saveparams("")
00102 {}
00103 
00104 NeuralNet::~NeuralNet()
00105 {
00106 }
00107 
00108 void NeuralNet::declareOptions(OptionList& ol)
00109 {
00110     declareOption(ol, "nhidden", &NeuralNet::nhidden, OptionBase::buildoption, 
00111                   "    number of hidden units in first hidden layer (0 means no hidden layer)\n");
00112 
00113     declareOption(ol, "nhidden2", &NeuralNet::nhidden2, OptionBase::buildoption, 
00114                   "    number of hidden units in second hidden layer (0 means no hidden layer)\n");
00115 
00116     declareOption(ol, "weight_decay", &NeuralNet::weight_decay, OptionBase::buildoption, 
00117                   "    global weight decay for all layers\n");
00118 
00119     declareOption(ol, "bias_decay", &NeuralNet::bias_decay, OptionBase::buildoption, 
00120                   "    global bias decay for all layers\n");
00121 
00122     declareOption(ol, "layer1_weight_decay", &NeuralNet::layer1_weight_decay, OptionBase::buildoption, 
00123                   "    Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00124     declareOption(ol, "layer1_bias_decay", &NeuralNet::layer1_bias_decay, OptionBase::buildoption, 
00125                   "    Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00126 
00127     declareOption(ol, "layer2_weight_decay", &NeuralNet::layer2_weight_decay, OptionBase::buildoption, 
00128                   "    Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00129 
00130     declareOption(ol, "layer2_bias_decay", &NeuralNet::layer2_bias_decay, OptionBase::buildoption, 
00131                   "    Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00132 
00133     declareOption(ol, "output_layer_weight_decay", &NeuralNet::output_layer_weight_decay, OptionBase::buildoption, 
00134                   "    Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00135 
00136     declareOption(ol, "output_layer_bias_decay", &NeuralNet::output_layer_bias_decay, OptionBase::buildoption, 
00137                   "    Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00138 
00139     declareOption(ol, "direct_in_to_out_weight_decay", &NeuralNet::direct_in_to_out_weight_decay, OptionBase::buildoption, 
00140                   "    Additional weight decay for the direct in-to-out layer.  Is added to 'weight_decay'.\n");
00141 
00142     declareOption(ol, "direct_in_to_out", &NeuralNet::direct_in_to_out, OptionBase::buildoption, 
00143                   "    should we include direct input to output connections?\n");
00144 
00145     declareOption(ol, "output_transfer_func", &NeuralNet::output_transfer_func, OptionBase::buildoption, 
00146                   "    what transfer function to use for ouput layer? \n"
00147                   "    one of: tanh, sigmoid, exp, softmax \n"
00148                   "    an empty string means no output transfer function \n");
00149 
00150     declareOption(ol, "seed", &NeuralNet::iseed, OptionBase::buildoption, 
00151                   "    Seed for the random number generator used to initialize parameters. If -1 then use time of day.\n");
00152 
00153     declareOption(ol, "cost_funcs", &NeuralNet::cost_funcs, OptionBase::buildoption, 
00154                   "    a list of cost functions to use\n"
00155                   "    in the form \"[ cf1; cf2; cf3; ... ]\" where each function is one of: \n"
00156                   "      mse (for regression)\n"
00157                   "      mse_onehot (for classification)\n"
00158                   "      NLL (negative log likelihood -log(p[c]) for classification) \n"
00159                   "      class_error (classification error) \n"
00160                   "      semisupervised_prob_class\n"
00161                   "    The first function of the list will be used as \n"
00162                   "    the objective function to optimize \n"
00163                   "    (possibly with an added weight decay penalty) \n"
00164                   "    If semisupervised_prob_class is chosen, then the options\n"
00165                   "    semisupervised_{flatten_factor,prior} will be used. Note that\n"
00166                   "    the output_transfer_func should be the softmax, in that case.\n"
00167         );
00168   
00169     declareOption(ol, "semisupervised_flatten_factor", &NeuralNet::semisupervised_flatten_factor, OptionBase::buildoption, 
00170                   "    Hyper-parameter of the semi-supervised criterion for probabilistic classifiers\n");
00171 
00172     declareOption(ol, "semisupervised_prior", &NeuralNet::semisupervised_prior, OptionBase::buildoption, 
00173                   "    Hyper-parameter of the semi-supervised criterion = prior classes probabilities\n");
00174 
00175     declareOption(ol, "optimizer", &NeuralNet::optimizer, OptionBase::buildoption, 
00176                   "    specify the optimizer to use\n");
00177 
00178     declareOption(ol, "batch_size", &NeuralNet::batch_size, OptionBase::buildoption, 
00179                   "    how many samples to use to estimate the avergage gradient before updating the weights\n"
00180                   "    0 is equivalent to specifying training_set->length() \n"
00181                   "    NOTE: this overrides the optimizer's 'n_updates' and 'every_iterations'.\n");
00182 
00183     declareOption(ol, "nepochs", &NeuralNet::nepochs, OptionBase::buildoption, 
00184                   "    how many times the optimizer gets to see the whole training set.\n");
00185 
00186     declareOption(ol, "paramsvalues", &NeuralNet::paramsvalues, OptionBase::learntoption, 
00187                   "    The learned parameter vector (in which order?)\n");
00188 
00189     declareOption(ol, "saveparams", &NeuralNet::saveparams, OptionBase::learntoption, 
00190                   "    This string, if not empty, indicates where in the expdir directory\n"
00191                   "    to save the final paramsvalues\n");
00192   
00193     declareOption(ol, "normalization", &NeuralNet::normalization, OptionBase::buildoption,
00194                   "    The normalization to be applied to the data\n");
00195     inherited::declareOptions(ol);
00196 
00197 }
00198 
00199 void NeuralNet::build()
00200 {
00201     inherited::build();
00202     build_();
00203 }
00204 
00205 void NeuralNet::build_()
00206 {
00207     /*
00208      * Create Topology Var Graph
00209      */
00210 
00211     // init. basic vars
00212     input = Var(inputsize(), "input");
00213     if (normalization.length()) {
00214         Var means(normalization[0]);
00215         Var stddevs(normalization[1]);
00216         output = (input - means) / stddevs;
00217     } else
00218         output = input;
00219     params.resize(0);
00220 
00221     // first hidden layer
00222     if(nhidden>0)
00223     {
00224         w1 = Var(1+inputsize(), nhidden, "w1");      
00225         output = tanh(affine_transform(output,w1));
00226         params.append(w1);
00227     }
00228 
00229     // second hidden layer
00230     if(nhidden2>0)
00231     {
00232         w2 = Var(1+nhidden, nhidden2, "w2");
00233         output = tanh(affine_transform(output,w2));
00234         params.append(w2);
00235     }
00236 
00237     // output layer before transfer function
00238     wout = Var(1+output->size(), outputsize(), "wout");
00239     output = affine_transform(output,wout);
00240     params.append(wout);
00241 
00242     // direct in-to-out layer
00243     if(direct_in_to_out)
00244     {
00245         wdirect = Var(inputsize(), outputsize(), "wdirect");// Var(1+inputsize(), outputsize(), "wdirect");
00246         output += transposeProduct(wdirect, input);// affine_transform(input,wdirect);
00247         params.append(wdirect);
00248     }
00249 
00250     /*
00251      * output_transfer_func
00252      */
00253     if(output_transfer_func!="")
00254     {
00255         if(output_transfer_func=="tanh")
00256             output = tanh(output);
00257         else if(output_transfer_func=="sigmoid")
00258             output = sigmoid(output);
00259         else if(output_transfer_func=="softplus")
00260             output = softplus(output);
00261         else if(output_transfer_func=="exp")
00262             output = exp(output);
00263         else if(output_transfer_func=="softmax")
00264             output = softmax(output);
00265         else if (output_transfer_func == "log_softmax")
00266             output = log_softmax(output);
00267         else
00268             PLERROR("In NeuralNet::build_()  unknown output_transfer_func option: %s",output_transfer_func.c_str());
00269     }
00270 
00271     /*
00272      * target & weights
00273      */
00274     if(weightsize() != 0 && weightsize() != 1 && targetsize()/2 != weightsize())
00275         PLERROR("In NeuralNet::build_()  weightsize must be either:\n"
00276                 "\t0: no weights on costs\n"
00277                 "\t1: single weight applied on total cost\n"
00278                 "\ttargetsize/2: vector of weights applied individually to each component of the cost\n"
00279                 "weightsize= %d; targetsize= %d.", weightsize(), targetsize());
00280 
00281 
00282     target_and_weights= Var(targetsize(), "target_and_weights");
00283     target = new SubMatVariable(target_and_weights, 0, 0, targetsize()-weightsize(), 1);
00284     target->setName("target");
00285     if(0 < weightsize())
00286     {
00287         costweights = new SubMatVariable(target_and_weights, targetsize()-weightsize(), 0, weightsize(), 1);
00288         costweights->setName("costweights");
00289     }
00290     /*
00291      * costfuncs
00292      */
00293     int ncosts = cost_funcs.size();  
00294     if(ncosts<=0)
00295         PLERROR("In NeuralNet::build_()  Empty cost_funcs : must at least specify the cost function to optimize!");
00296     costs.resize(ncosts);
00297 
00298     for(int k=0; k<ncosts; k++)
00299     {
00300         bool handles_missing_target=false;
00301         // create costfuncs and apply individual weights if weightsize() > 1
00302         if(cost_funcs[k]=="mse")
00303             if(weightsize() < 2)
00304                 costs[k]= sumsquare(output-target);
00305             else
00306                 costs[k]= weighted_sumsquare(output-target, costweights);
00307         else if(cost_funcs[k]=="mse_onehot")
00308             costs[k] = onehot_squared_loss(output, target);
00309         else if(cost_funcs[k]=="NLL") {
00310             if (output_transfer_func == "log_softmax")
00311                 costs[k] = -output[target];
00312             else
00313                 costs[k] = neg_log_pi(output, target);
00314         } else if(cost_funcs[k]=="class_error")
00315             costs[k] = classification_loss(output, target);
00316         else if(cost_funcs[k]=="multiclass_error")
00317             if(weightsize() < 2)
00318                 costs[k] = multiclass_loss(output, target);
00319             else
00320                 PLERROR("In NeuralNet::build()  weighted multiclass error cost not implemented.");
00321         else if(cost_funcs[k]=="cross_entropy")
00322             if(weightsize() < 2)
00323                 costs[k] = cross_entropy(output, target);
00324             else
00325                 PLERROR("In NeuralNet::build()  weighted cross entropy cost not implemented.");
00326         else if (cost_funcs[k]=="semisupervised_prob_class")
00327         {
00328             if (output_transfer_func!="softmax")
00329                 PLWARNING("To properly use the semisupervised_prob_class criterion, the transfer function should probably be a softmax, to guarantee positive probabilities summing to 1");
00330             if (semisupervised_prior.length()==0) // default value is (1,1,1...)
00331             {
00332                 semisupervised_prior.resize(outputsize());
00333                 semisupervised_prior.fill(1.0);
00334             }
00335             costs[k] = new SemiSupervisedProbClassCostVariable(output,target,new SourceVariable(semisupervised_prior),
00336                                                                semisupervised_flatten_factor);
00337             handles_missing_target=true;
00338         }
00339         else
00340         {
00341             costs[k]= dynamic_cast<Variable*>(newObject(cost_funcs[k]));
00342             if(costs[k].isNull())
00343                 PLERROR("In NeuralNet::build_()  unknown cost_func option: %s",cost_funcs[k].c_str());
00344             if(weightsize() < 2)
00345                 costs[k]->setParents(output & target);
00346             else
00347                 costs[k]->setParents(output & target & costweights);
00348             costs[k]->build();
00349         }
00350 
00351         // apply a single global weight if weightsize() == 1
00352         if(1 == weightsize())
00353             costs[k]= costs[k] * costweights;
00354 
00355         if (!handles_missing_target)
00356             costs[k] = ifThenElse(isMissing(target),var(MISSING_VALUE),costs[k]);
00357     }
00358 
00359 
00360     /*
00361      * weight and bias decay penalty
00362      */
00363 
00364     // create penalties
00365     VarArray penalties;
00366     if(w1 && ((layer1_weight_decay + weight_decay)!=0 || (layer1_bias_decay + bias_decay)!=0))
00367         penalties.append(affine_transform_weight_penalty(w1, (layer1_weight_decay + weight_decay), (layer1_bias_decay + bias_decay)));
00368     if(w2 && ((layer2_weight_decay + weight_decay)!=0 || (layer2_bias_decay + bias_decay)!=0))
00369         penalties.append(affine_transform_weight_penalty(w2, (layer2_weight_decay + weight_decay), (layer2_bias_decay + bias_decay)));
00370     if(wout && ((output_layer_weight_decay + weight_decay)!=0 || (output_layer_bias_decay + bias_decay)!=0))
00371         penalties.append(affine_transform_weight_penalty(wout, (output_layer_weight_decay + weight_decay), (output_layer_bias_decay + bias_decay)));
00372     if(wdirect && (direct_in_to_out_weight_decay + weight_decay) != 0)
00373         penalties.append(sumsquare(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00374 
00375     // apply penalty to cost
00376     if(penalties.size() != 0)
00377         cost = hconcat( sum(hconcat(costs[0] & penalties)) & costs );
00378     else    
00379         cost = hconcat(costs[0] & costs);
00380   
00381   
00382     cost->setName("cost");
00383     output->setName("output");
00384 
00385     // norman: ambiguous conversion (bool or char*?)
00386     //if(paramsvalues && (paramsvalues.size() == params.nelems()))
00387     if((bool)(paramsvalues) && (paramsvalues.size() == params.nelems()))
00388     {
00389         params << paramsvalues;
00390         initial_paramsvalues.resize(paramsvalues.length());
00391         initial_paramsvalues << paramsvalues;
00392     }
00393     else
00394     {
00395         paramsvalues.resize(params.nelems());
00396         initializeParams();
00397     }
00398     params.makeSharedValue(paramsvalues);
00399 
00400     // Funcs
00401 
00402     f = Func(input, output);
00403     costf = Func(input&target_and_weights, output&cost);
00404     costf->recomputeParents();
00405     output_and_target_to_cost = Func(output&target_and_weights, cost); 
00406     output_and_target_to_cost->recomputeParents();
00407 }
00408 
00409 Array<string> NeuralNet::costNames() const
00410 {
00411     return (cost_funcs[0]+"+penalty") & cost_funcs;
00412 }
00413 
00414 int NeuralNet::costsize() const 
00415 { return cost->size(); }
00416 
00417 void NeuralNet::train(VMat training_set)
00418 {
00419     setTrainingSet(training_set);
00420     int l = training_set->length();  
00421     int nsamples = batch_size>0 ? batch_size : l;
00422     Func paramf = Func(input&target_and_weights, cost); // parameterized function to optimize
00423     Var totalcost = meanOf(training_set,paramf, nsamples);
00424     optimizer->setToOptimize(params, totalcost);
00425     optimizer->nupdates = (nepochs*l)/nsamples;
00426     optimizer->every = l/nsamples;
00427     optimizer->addMeasurer(*this);
00428     optimizer->build();
00429     optimizer->optimize();
00430   
00431     output_and_target_to_cost->recomputeParents();
00432     costf->recomputeParents();
00433     // cerr << "totalcost->value = " << totalcost->value << endl;
00434     setTrainCost(totalcost->value);
00435     if (saveparams!="")
00436         PLearn::save(expdir+saveparams,paramsvalues);
00437 }
00438 
00439 
00440 void NeuralNet::initializeParams()
00441 {
00442     if (iseed<0)
00443         seed();
00444     else
00445         manual_seed(iseed);
00446     //real delta = 1./sqrt(inputsize());
00447     real delta = 1./inputsize();
00448     /*
00449       if(direct_in_to_out)
00450       {
00451       //fill_random_uniform(wdirect->value, -delta, +delta);
00452       fill_random_normal(wdirect->value, 0, delta);
00453       //wdirect->matValue(0).clear();
00454       }
00455     */
00456     if(nhidden>0)
00457     {
00458         //fill_random_uniform(w1->value, -delta, +delta);
00459         //delta = 1./sqrt(nhidden);
00460         fill_random_normal(w1->value, 0, delta);
00461         if(direct_in_to_out)
00462         {
00463             //fill_random_uniform(wdirect->value, -delta, +delta);
00464             fill_random_normal(wdirect->value, 0, delta);
00465             wdirect->matValue(0).clear();
00466         }
00467         delta = 1./nhidden;
00468         w1->matValue(0).clear();
00469     }
00470     if(nhidden2>0)
00471     {
00472         //fill_random_uniform(w2->value, -delta, +delta);
00473         //delta = 1./sqrt(nhidden2);
00474         fill_random_normal(w2->value, 0, delta);
00475         delta = 1./nhidden2;
00476         w2->matValue(0).clear();
00477     }
00478     //fill_random_uniform(wout->value, -delta, +delta);
00479     fill_random_normal(wout->value, 0, delta);
00480     wout->matValue(0).clear();
00481 }
00482 
00483 void NeuralNet::use(const Vec& in, Vec& prediction)
00484 {
00485     f->fprop(in,prediction);
00486 }
00487 
00488 void NeuralNet::useAndCost(const Vec& inputvec, const Vec& targetvec, Vec outputvec, Vec costvec)
00489 {
00490     costf->fprop(inputvec&targetvec, outputvec&costvec);
00491 }
00492 
00493 void NeuralNet::computeCost(const Vec& inputvec, const Vec& targetvec, const Vec& outputvec, const Vec& costvec)
00494 {
00495     output_and_target_to_cost->fprop(outputvec&targetvec, costvec); 
00496 }
00497 
00498 void NeuralNet::forget()
00499 {
00500     if(initial_paramsvalues)
00501         params << initial_paramsvalues;
00502     else
00503         initializeParams();
00504     inherited::forget();
00505 }
00506 
00507 void NeuralNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00508 {
00509     inherited::makeDeepCopyFromShallowCopy(copies);
00510     deepCopyField(optimizer, copies);
00511 }
00512 
00513 } // end of namespace PLearn
00514 
00515 
00516 /*
00517   Local Variables:
00518   mode:c++
00519   c-basic-offset:4
00520   c-file-style:"stroustrup"
00521   c-file-offsets:((innamespace . 0)(inline-open . 0))
00522   indent-tabs-mode:nil
00523   fill-column:79
00524   End:
00525 */
00526 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines