PLearn 0.1
NNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NNet.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2005 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* *******************************************************      
00038  * $Id: NNet.cc 10099 2009-04-08 14:40:42Z tihocan $
00039  ******************************************************* */
00040 
00041 
00042 #include <plearn/var/AffineTransformVariable.h>
00043 #include <plearn/var/AffineTransformWeightPenalty.h>
00044 #include <plearn/var/ArgmaxVariable.h>
00045 #include <plearn/var/BinaryClassificationLossVariable.h>
00046 #include <plearn/var/ClassificationLossVariable.h>
00047 #include <plearn/var/ConcatColumnsVariable.h>
00048 #include <plearn/var/CrossEntropyVariable.h>
00049 #include <plearn/var/DivVariable.h>
00050 #include <plearn/var/ExpVariable.h>
00051 #include <plearn/var/LiftOutputVariable.h>
00052 #include <plearn/var/LogSoftmaxVariable.h>
00053 #include <plearn/var/MarginPerceptronCostVariable.h>
00054 #include <plearn/var/ConfRatedAdaboostCostVariable.h>
00055 #include <plearn/var/GradientAdaboostCostVariable.h>
00056 #include <plearn/var/LogAddVariable.h>
00057 #include <plearn/var/MulticlassLossVariable.h>
00058 #include <plearn/var/NegCrossEntropySigmoidVariable.h>
00059 #include <plearn/var/NegLogPoissonVariable.h>
00060 #include <plearn/var/OneHotSquaredLoss.h>
00061 #include <plearn/var/PlusConstantVariable.h>
00062 #include <plearn/var/PlusVariable.h>
00063 #include <plearn/var/PlusManyVariable.h>
00064 #include <plearn/var/ProductVariable.h>
00065 #include <plearn/var/RowSumSquareVariable.h>
00066 #include <plearn/var/SigmoidVariable.h>
00067 #include <plearn/var/SoftmaxVariable.h>
00068 #include <plearn/var/SoftplusVariable.h>
00069 #include <plearn/var/SquareVariable.h>
00070 #include <plearn/var/SquareRootVariable.h>
00071 #include <plearn/var/SumVariable.h>
00072 #include <plearn/var/SumAbsVariable.h>
00073 #include <plearn/var/SumOfVariable.h>
00074 #include <plearn/var/SumOverBagsVariable.h>
00075 #include <plearn/var/SumSquareVariable.h>
00076 #include <plearn/var/TanhVariable.h>
00077 #include <plearn/var/TransposeVariable.h>
00078 #include <plearn/var/UnaryHardSlopeVariable.h>
00079 #include <plearn/var/UnfoldedFuncVariable.h>
00080 #include <plearn/var/Var_operators.h>
00081 #include <plearn/var/Var_utils.h>
00082 #include <plearn/var/FNetLayerVariable.h>
00083 
00084 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00085 //#include <plearn/display/DisplayUtils.h>
00086 //#include "GradientOptimizer.h"
00087 #include "NNet.h"
00088 // #include <plearn/math/random.h>
00089 #include <plearn/vmat/SubVMatrix.h>
00090 #include <plearn/vmat/FileVMatrix.h>
00091 
00092 namespace PLearn {
00093 using namespace std;
00094 
00095 PLEARN_IMPLEMENT_OBJECT(NNet, "Ordinary Feedforward Neural Network with 1 or 2 hidden layers", 
00096                         "Neural network with many bells and whistles...");
00097 
00099 // NNet //
00101 NNet::NNet():
00102 n_training_bags(-1),
00103 nhidden(0),
00104 nhidden2(0),
00105 noutputs(0),
00106 operate_on_bags(false),
00107 max_bag_size(20),
00108 weight_decay(0),
00109 bias_decay(0),
00110 layer1_weight_decay(0),
00111 layer1_bias_decay(0),
00112 layer2_weight_decay(0),
00113 layer2_bias_decay(0),
00114 output_layer_weight_decay(0),
00115 output_layer_bias_decay(0),
00116 direct_in_to_out_weight_decay(0),
00117 classification_regularizer(0),
00118 margin(1),
00119 fixed_output_weights(0),
00120 rbf_layer_size(0),
00121 first_class_is_junk(1),
00122 penalty_type("L2_square"),
00123 L1_penalty(false),
00124 input_reconstruction_penalty(0),
00125 direct_in_to_out(false),
00126 output_transfer_func(""),
00127 hidden_transfer_func("tanh"),
00128 interval_minval(0), interval_maxval(1),
00129 do_not_change_params(false),
00130 first_hidden_layer_is_output(false),
00131 transpose_first_hidden_layer(false),
00132 n_non_params_in_first_hidden_layer(0),
00133 batch_size(1),
00134 initialization_method("uniform_linear"),
00135 ratio_rank(0)
00136 {
00137     // Use the generic PLearner random number generator.
00138     random_gen = new PRandom();
00139 }
00140 
00141 void NNet::declareOptions(OptionList& ol)
00142 {
00143     declareOption(
00144         ol, "nhidden", &NNet::nhidden, OptionBase::buildoption, 
00145         "Number of hidden units in first hidden layer (0 means no hidden layer)\n");
00146 
00147     declareOption(
00148         ol, "nhidden2", &NNet::nhidden2, OptionBase::buildoption, 
00149         "Number of hidden units in second hidden layer (0 means no hidden layer)\n");
00150 
00151     declareOption(
00152         ol, "noutputs", &NNet::noutputs, OptionBase::buildoption, 
00153         "Number of output units. This gives this learner its outputsize.  It is\n"
00154         "typically of the same dimensionality as the target for regression\n"
00155         "problems.  But for classification problems where target is just the\n"
00156         "class number, noutputs is usually of dimensionality number of classes\n"
00157         "(as we want to output a score or probability vector, one per class).\n"
00158         "\n"
00159         "The default value is 0, which is caught at build-time and gives an\n"
00160         "error.  If a value of -1 is put, noutputs is set from the targetsize of\n"
00161         "the trainingset the first time setTrainingSet() is called on the\n"
00162         "learner (appropriate for regression scenarios).  This allows using the\n"
00163         "learner as a 'template' without knowing in advance the number of\n"
00164         "outputs it should have to handle.  Future extensions will cover the\n"
00165         "case of automatically discovering the outputsize for classification.\n");
00166 
00167     declareOption(
00168         ol, "weight_decay", &NNet::weight_decay, OptionBase::buildoption, 
00169         "Global weight decay for all layers\n");
00170 
00171     declareOption(
00172         ol, "bias_decay", &NNet::bias_decay, OptionBase::buildoption, 
00173         "Global bias decay for all layers\n");
00174 
00175     declareOption(
00176         ol, "layer1_weight_decay", &NNet::layer1_weight_decay, OptionBase::buildoption, 
00177         "Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00178 
00179     declareOption(
00180         ol, "layer1_bias_decay", &NNet::layer1_bias_decay, OptionBase::buildoption, 
00181         "Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00182 
00183     declareOption(
00184         ol, "layer2_weight_decay", &NNet::layer2_weight_decay, OptionBase::buildoption, 
00185         "Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00186 
00187     declareOption(
00188         ol, "layer2_bias_decay", &NNet::layer2_bias_decay, OptionBase::buildoption, 
00189         "Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00190 
00191     declareOption(
00192         ol, "output_layer_weight_decay", &NNet::output_layer_weight_decay, OptionBase::buildoption, 
00193         "Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00194 
00195     declareOption(
00196         ol, "output_layer_bias_decay", &NNet::output_layer_bias_decay, OptionBase::buildoption, 
00197         "Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00198 
00199     declareOption(
00200         ol, "direct_in_to_out_weight_decay", &NNet::direct_in_to_out_weight_decay, OptionBase::buildoption, 
00201         "Additional weight decay for the direct in-to-out layer.  Is added to 'weight_decay'.\n");
00202 
00203     declareOption(
00204         ol, "penalty_type", &NNet::penalty_type,
00205         OptionBase::buildoption,
00206         "Penalty to use on the weights (for weight and bias decay).\n"
00207         "Can be any of:\n"
00208         "  - \"L1\": L1 norm,\n"
00209         "  - \"L1_square\": square of the L1 norm,\n"
00210         "  - \"L2_square\" (default): square of the L2 norm.\n");
00211 
00212     declareOption(
00213         ol, "L1_penalty", &NNet::L1_penalty, OptionBase::buildoption,
00214         "Deprecated - You should use \"penalty_type\" instead\n"
00215         "should we use L1 penalty instead of the default L2 penalty on the weights?\n");
00216 
00217     declareOption(
00218         ol, "fixed_output_weights", &NNet::fixed_output_weights, OptionBase::buildoption, 
00219         "If true then the output weights are not learned. They are initialized to +1 or -1 randomly.\n");
00220 
00221     declareOption(
00222         ol, "input_reconstruction_penalty", &NNet::input_reconstruction_penalty, OptionBase::buildoption,
00223         "If >0 then a set of weights will be added from a hidden layer to predict (reconstruct) the inputs\n"
00224         "and the total loss will include an extra term that is the squared input reconstruction error,\n"
00225         "multiplied by the input_reconstruction_penalty factor.\n");
00226 
00227     declareOption(
00228         ol, "direct_in_to_out", &NNet::direct_in_to_out, OptionBase::buildoption, 
00229         "should we include direct input to output connections?\n");
00230 
00231     declareOption(
00232         ol, "rbf_layer_size", &NNet::rbf_layer_size, OptionBase::buildoption,
00233         "If non-zero, add an extra layer which computes N(h(x);mu_i,sigma_i) (Gaussian density) for the\n"
00234         "i-th output unit with mu_i a free vector and sigma_i a free scalar, and h(x) the vector of\n"
00235         "activations of the 'representation' output, i.e. what would be the output layer otherwise. The\n"
00236         "given non-zero value is the number of these 'representation' outputs. Typically this\n"
00237         "makes sense for classification problems, with a softmax output_transfer_func. If the\n"
00238         "first_class_is_junk option is set then the first output (first class) does not get a\n"
00239         "Gaussian density but just a 'pseudo-uniform' density (the single free parameter is the\n"
00240         "value of that density) and in a softmax it makes sure that when h(x) is far from the\n"
00241         "centers mu_i for all the other classes then the last class gets the strongest posterior probability.\n");
00242 
00243     declareOption(
00244         ol, "first_class_is_junk", &NNet::first_class_is_junk, OptionBase::buildoption, 
00245         "This option is used only when rbf_layer_size>0. If true then the first class is\n"
00246         "treated differently and gets a pre-transfer-function value that is a learned constant, whereas\n"
00247         "the others get a normal centered at mu_i.\n");
00248 
00249     declareOption(
00250         ol, "output_transfer_func", &NNet::output_transfer_func, OptionBase::buildoption, 
00251         "what transfer function to use for ouput layer? One of: \n"
00252         "  - \"tanh\" \n"
00253         "  - \"sigmoid\" \n"
00254         "  - \"exp\" \n"
00255         "  - \"softplus\" \n"
00256         "  - \"softmax\" \n"
00257         "  - \"log_softmax\" \n"
00258         "  - \"interval(<minval>,<maxval>)\", which stands for\n"
00259         "          <minval>+(<maxval>-<minval>)*sigmoid(.).\n"
00260         "An empty string or \"none\" means no output transfer function \n");
00261 
00262     declareOption(
00263         ol, "hidden_transfer_func", &NNet::hidden_transfer_func, OptionBase::buildoption, 
00264         "What transfer function to use for hidden units? One of \n"
00265         "  - \"linear\" \n"
00266         "  - \"tanh\" \n"
00267         "  - \"sigmoid\" \n"
00268         "  - \"exp\" \n"
00269         "  - \"softplus\" \n"
00270         "  - \"softmax\" \n"
00271         "  - \"log_softmax\" \n"
00272         "  - \"hard_slope\" \n"
00273         "  - \"symm_hard_slope\" \n"
00274         "  - \"ratio\": e/(1+e) with e=sqrt(x'V'Vx + softplus(a)^2)\n"
00275         "               with a=b+W'x and V a matrix of rank 'ratio_rank'");
00276 
00277     declareOption(
00278         ol, "cost_funcs", &NNet::cost_funcs, OptionBase::buildoption, 
00279         "A list of cost functions to use\n"
00280         "in the form \"[ cf1; cf2; cf3; ... ]\" where each function is one of: \n"
00281         "  - \"mse\" (for regression)\n"
00282         "  - \"mse_onehot\" (for classification)\n"
00283         "  - \"NLL\" (negative log likelihood -log(p[c]) for classification) \n"
00284         "  - \"class_error\" (classification error) \n"
00285         "  - \"binary_class_error\" (classification error for a 0-1 binary classifier)\n"
00286         "  - \"multiclass_error\" \n"
00287         "  - \"cross_entropy\" (for binary classification)\n" 
00288         "  - \"stable_cross_entropy\" (more accurate backprop and possible regularization, for binary classification)\n"
00289         "  - \"margin_perceptron_cost\" (a hard version of the cross_entropy, uses the 'margin' option)\n"
00290         "  - \"lift_output\" (not a real cost function, just the output for lift computation)\n"
00291         "  - \"conf_rated_adaboost_cost\" (for Confidence-rated Adaboost)\n"
00292         "  - \"gradient_adaboost_cost\" (for MarginBoost, see \"Functional \n"
00293         "                                Gradient Techniques for Combining \n"
00294         "                                Hypotheses\" by Mason et al.)\n"
00295         "  - \"poisson_nll\"\n"
00296         "  - \"L1\"\n"
00297         "The FIRST function of the list will be used as \n"
00298         "the objective function to optimize \n"
00299         "(possibly with an added weight decay penalty) \n");
00300   
00301     declareOption(
00302         ol, "classification_regularizer", &NNet::classification_regularizer, OptionBase::buildoption, 
00303         "Used only in the stable_cross_entropy cost function, to fight overfitting (0<=r<1)\n");
00304 
00305     declareOption(
00306         ol, "first_hidden_layer", &NNet::first_hidden_layer, OptionBase::buildoption, 
00307         "A user-specified NAry Var that computes the output of the first hidden layer\n"
00308         "from the network input vector and a set of parameters. Its first argument should\n"
00309         "be the network input and the remaining arguments the tunable parameters.\n",
00310         OptionBase::advanced_level);
00311 
00312     declareOption(
00313         ol, "first_hidden_layer_is_output",
00314         &NNet::first_hidden_layer_is_output, OptionBase::buildoption,
00315         "If true and a 'first_hidden_layer' Var is provided, then this layer\n"
00316         "will be considered as the NNet output before transfer function.",
00317         OptionBase::advanced_level);
00318 
00319     declareOption(
00320         ol, "n_non_params_in_first_hidden_layer",
00321         &NNet::n_non_params_in_first_hidden_layer,
00322         OptionBase::buildoption, 
00323         "Number of elements in the 'varray' option of 'first_hidden_layer'\n"
00324         "that are not updated parameters (assumed to be the last elements in\n"
00325         "'varray').",
00326         OptionBase::advanced_level);
00327 
00328     declareOption(
00329         ol, "transpose_first_hidden_layer",
00330         &NNet::transpose_first_hidden_layer,
00331         OptionBase::buildoption, 
00332         "If true and the 'first_hidden_layer' option is set, this layer will\n"
00333         "be transposed, and the input variable given to this layer will also\n"
00334         "be transposed.", OptionBase::advanced_level);
00335 
00336     declareOption(
00337         ol, "margin", &NNet::margin, OptionBase::buildoption, 
00338         "Margin requirement, used only with the margin_perceptron_cost cost function.\n"
00339         "It should be positive, and larger values regularize more.\n");
00340 
00341     declareOption(
00342         ol, "do_not_change_params", &NNet::do_not_change_params, OptionBase::buildoption, 
00343         "If set to 1, the weights won't be loaded nor initialized at build time.");
00344 
00345     declareOption(
00346         ol, "optimizer", &NNet::optimizer, OptionBase::buildoption, 
00347         "Specify the optimizer to use\n");
00348 
00349     declareOption(
00350         ol, "batch_size", &NNet::batch_size, OptionBase::buildoption, 
00351         "How many samples to use to estimate the avergage gradient before updating the weights\n"
00352         "0 is equivalent to specifying training_set->length() \n");
00353 
00354     declareOption(
00355         ol, "initialization_method", &NNet::initialization_method, OptionBase::buildoption, 
00356         "The method used to initialize the weights:\n"
00357         " - \"normal_linear\"  = a normal law with variance 1/n_inputs\n"
00358         " - \"normal_sqrt\"    = a normal law with variance 1/sqrt(n_inputs)\n"
00359         " - \"uniform_linear\" = a uniform law in [-1/n_inputs, 1/n_inputs]\n"
00360         " - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(n_inputs), 1/sqrt(n_inputs)]\n"
00361         " - \"zero\"           = all weights are set to 0\n");
00362 
00363     declareOption(
00364         ol, "operate_on_bags", &NNet::operate_on_bags, OptionBase::buildoption,
00365         "If True, then samples are no longer considered as unique entities.\n"
00366         "Instead, each sample belongs to a so-called 'bag', that may contain\n"
00367         "1 or more samples. The last column of the target is assumed to\n"
00368         "provide information about bags (see help of SumOverBagsVariable for\n"
00369         "details on the coding of bags).\n"
00370         "When operating on bags, each bag is considered a training sample.\n"
00371         "The activations a_ci of output units c for each bag sample i are\n"
00372         "combined within each bag, yielding bag activation a_c given by:\n"
00373         "   a_c = logadd(a_c1, ..., acn)\n"
00374         "In particular, when using the 'softmax' output transfer function,\n"
00375         "this corresponds to computing:\n"
00376         "   P(class = c | x_1, ..., x_i, ..., x_n) =\n"
00377         "                          (\\sum_i exp(a_ci)) / \\sum_c,i exp(a_ci)\n"
00378         "where a_ci is the activation of output node c for the i-th sample\n"
00379         "x_i in the bag.",
00380         OptionBase::advanced_level);
00381 
00382     declareOption(
00383         ol, "max_bag_size", &NNet::max_bag_size, OptionBase::buildoption,
00384         "Maximum number of samples in a bag (used with 'operate_on_bags').",
00385         OptionBase::advanced_level);
00386     
00387     declareOption(
00388         ol, "ratio_rank", &NNet::ratio_rank, OptionBase::buildoption,
00389         "Rank of matrix V when using the 'ratio' hidden transfer function.\n"
00390         "Use -1 for full rank, and 0 to have no quadratic term.",
00391         OptionBase::advanced_level);
00392 
00393 
00394     // Learnt options.
00395     
00396     declareOption(
00397         ol, "paramsvalues", &NNet::paramsvalues, OptionBase::learntoption, 
00398         "The learned parameter vector\n");
00399 
00400     // Introspective options.  The following are direct views on the individual
00401     // parameters of the NNet.  They are marked 'nosave' since they overlap
00402     // with paramsvalues, but are useful for inspecting the NNet structure from
00403     // a Python program.
00404     declareOption(
00405         ol, "w1", &NNet::w1,
00406         OptionBase::learntoption | OptionBase::nosave,
00407         "(Introspection option)  bias and weights of first hidden layer");
00408     
00409     declareOption(
00410         ol, "w2", &NNet::w2,
00411         OptionBase::learntoption | OptionBase::nosave,
00412         "(Introspection option)  bias and weights of second hidden layer");
00413     
00414     declareOption(
00415         ol, "wout", &NNet::wout,
00416         OptionBase::learntoption | OptionBase::nosave,
00417         "(Introspection option)  bias and weights of output layer");
00418 
00419     declareOption(
00420         ol, "outbias", &NNet::outbias,
00421         OptionBase::learntoption | OptionBase::nosave,
00422         "(Introspection option)  bias used only if fixed_output_weights");
00423     
00424     declareOption(
00425         ol, "wdirect", &NNet::wdirect,
00426         OptionBase::learntoption | OptionBase::nosave,
00427         "(Introspection option)  bias and weights for direct in-to-out connection");
00428 
00429     declareOption(
00430         ol, "wrec", &NNet::wrec,
00431         OptionBase::learntoption | OptionBase::nosave,
00432         "(Introspection option)  input reconstruction weights (optional), from hidden layer to predicted input");
00433     
00434     inherited::declareOptions(ol);
00435 }
00436 
00438 // build //
00440 void NNet::build()
00441 {
00442     inherited::build();
00443     build_();
00444 }
00445 
00447 // buildBagOutputFromBagInputs //
00449 void NNet::buildBagOutputFromBagInputs(
00450         const Var& input, Var& before_transfer_func,
00451         const Var& bag_inputs, const Var& bag_size, Var& bag_output)
00452 {
00453     Func in_to_out = Func(input, before_transfer_func);
00454     Var tmp_out = new UnfoldedFuncVariable(bag_inputs, in_to_out, false,
00455                                            bag_size);
00456     before_transfer_func = new LogAddVariable(tmp_out, bag_size, "per_column");
00457     applyTransferFunc(before_transfer_func, bag_output);
00458 }
00459 
00461 // build_ //
00463 void NNet::build_()
00464 {
00465     /*
00466      * Create Topology Var Graph
00467      */
00468 
00469     // Don't do anything if we don't have a train_set
00470     // It's the only one who knows the inputsize and targetsize anyway...
00471 
00472     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00473     {
00474         // Ensure we have some inputs
00475         if (noutputs == 0)
00476             PLERROR("NNet: the option 'noutputs' must be specified");
00477 
00478         // Initialize input.
00479         input = Var(1, inputsize(), "input");
00480 
00481         // Initialize bag stuff.
00482         if (operate_on_bags) {
00483             bag_size = Var(1, 1, "bag_size");
00484             store_bag_size.resize(1);
00485             store_bag_inputs.resize(max_bag_size, inputsize());
00486         }
00487 
00488         params.resize(0);
00489         Var before_transfer_func;
00490 
00491         // Build main network graph.
00492         buildOutputFromInput(input, hidden_layer, before_transfer_func);
00493 
00494         // When operating on bags, use this network to compute the output on a
00495         // whole bag, which also becomes the output of the network.
00496         if (operate_on_bags) {
00497             bag_inputs = Var(max_bag_size, inputsize(), "bag_inputs");
00498             buildBagOutputFromBagInputs(input, before_transfer_func,
00499                                         bag_inputs, bag_size, output);
00500         }
00501 
00502         // Build target and weight variables.
00503         buildTargetAndWeight();
00504 
00505         // Build costs.
00506         if( L1_penalty )
00507         {
00508             PLDEPRECATED("Option \"L1_penalty\" deprecated. Please use \"penalty_type = L1\" instead.");
00509             L1_penalty = 0;
00510             penalty_type = "L1";
00511         }
00512 
00513         string pt = lowerstring( penalty_type );
00514         if( pt == "l1" )
00515             penalty_type = "L1";
00516         else if( pt == "l1_square" || pt == "l1 square" || pt == "l1square" )
00517             penalty_type = "L1_square";
00518         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00519             penalty_type = "L2_square";
00520         else if( pt == "l2" )
00521         {
00522             PLWARNING("L2 penalty not supported, assuming you want L2 square");
00523             penalty_type = "L2_square";
00524         }
00525         else
00526             PLERROR("penalty_type \"%s\" not supported", penalty_type.c_str());
00527 
00528         buildCosts(output, target, hidden_layer, before_transfer_func);
00529 
00530         // Shared values hack...
00531         if (!do_not_change_params) {
00532             if(paramsvalues.length() == params.nelems())
00533                 params << paramsvalues;
00534             else
00535             {
00536                 paramsvalues.resize(params.nelems());
00537                 initializeParams();
00538                 if(optimizer)
00539                     optimizer->reset();
00540             }
00541             params.makeSharedValue(paramsvalues);
00542         }
00543 
00544         // Build functions.
00545         buildFuncs(operate_on_bags ? bag_inputs : input,
00546                    output, target, sampleweight,
00547                    operate_on_bags ? bag_size : NULL);
00548 
00549     }
00550 }
00551 
00552 
00554 // setTrainingSet //
00556 void NNet::setTrainingSet(VMat training_set, bool call_forget)
00557 {
00558     PLASSERT( training_set );
00559     
00560     // Automatically set noutputs from targetsize if not already set
00561     if (noutputs < 0)
00562         noutputs = training_set->targetsize();
00563 
00564     inherited::setTrainingSet(training_set, call_forget);
00565     //cout << "name = " << name << endl << "targetsize = " << targetsize_ << endl << "weightsize = " << weightsize_ << endl;
00566 
00567     // Since the training set probably changed, it is safer to reset
00568     // 'n_training_bags', just in case.
00569     n_training_bags = -1;
00570     
00571 }
00572 
00574 // buildCosts //
00576 void NNet::buildCosts(const Var& the_output, const Var& the_target, const Var& hidden_layer, const Var& before_transfer_func) {
00577     int ncosts = cost_funcs.size();  
00578     if(ncosts<=0)
00579         PLERROR("In NNet::buildCosts - Empty cost_funcs : must at least specify the cost function to optimize!");
00580     costs.resize(ncosts);
00581 
00582     for (int k=0; k<ncosts; k++)
00583         costs[k] = getCost(cost_funcs[k], the_output, the_target, before_transfer_func);
00584 
00585     /*
00586      * weight and bias decay penalty
00587      */
00588 
00589     // create penalties
00590     buildPenalties(hidden_layer);
00591     test_costs = hconcat(costs);
00592 
00593     // Apply penalty to cost.
00594     // If there is no penalty, we still add costs[0] as the first cost, in
00595     // order to keep the same number of costs as if there was a penalty.
00596     if(penalties.size() != 0) {
00597         if (weightsize_>0)
00598             // only multiply by sampleweight if there are weights
00599             training_cost = hconcat(sampleweight*sum(hconcat(costs[0] & penalties))
00600                                     & (test_costs*sampleweight));
00601         else {
00602             training_cost = hconcat(sum(hconcat(costs[0] & penalties)) & test_costs);
00603         }
00604     } 
00605     else {
00606         if(weightsize_>0) {
00607             // only multiply by sampleweight if there are weights
00608             training_cost = hconcat(costs[0]*sampleweight & test_costs*sampleweight);
00609         } else {
00610             training_cost = hconcat(costs[0] & test_costs);
00611         }
00612     }
00613 
00614     training_cost->setName("training_cost");
00615     test_costs->setName("test_costs");
00616     the_output->setName("output");
00617 }
00618 
00620 // buildFuncs //
00622 void NNet::buildFuncs(const Var& the_input, const Var& the_output, const Var& the_target, const Var& the_sampleweight,
00623         const Var& the_bag_size) {
00624     invars.resize(0);
00625     VarArray outvars;
00626     VarArray testinvars;
00627     if (the_input)
00628     {
00629         invars.push_back(the_input);
00630         testinvars.push_back(the_input);
00631     }
00632     if (the_bag_size) {
00633         invars.append(the_bag_size);
00634         testinvars.append(the_bag_size);
00635     }
00636     if (the_output)
00637         outvars.push_back(the_output);
00638     if(the_target)
00639     {
00640         invars.push_back(the_target);
00641         testinvars.push_back(the_target);
00642         outvars.push_back(the_target);
00643     }
00644     if(the_sampleweight)
00645     {
00646         invars.push_back(the_sampleweight);
00647     }
00648     input_to_output = Func(the_input, the_output);
00649     test_costf = Func(testinvars, the_output&test_costs);
00650     test_costf->recomputeParents();
00651     output_and_target_to_cost = Func(outvars, test_costs); 
00652     // Since there will be a fprop() in the network, we need to make sure the
00653     // input is valid.
00654     if (train_set && train_set->length() >= the_input->length()) {
00655         Vec input, target;
00656         real weight;
00657         for (int i = 0; i < the_input->length(); i++) {
00658             train_set->getExample(i, input, target, weight);
00659             the_input->matValue(i) << input;
00660         }
00661     }
00662     output_and_target_to_cost->recomputeParents();
00663 }
00664 
00666 // buildOutputFromInput //
00668 void NNet::buildOutputFromInput(const Var& the_input, Var& hidden_layer, Var& before_transfer_func) {
00669     output = the_input;
00670 
00671     // First hidden layer.
00672 
00673     if (first_hidden_layer)
00674     {
00675         NaryVariable* layer_var = dynamic_cast<NaryVariable*>((Variable*)first_hidden_layer);
00676         if (!layer_var)
00677             PLERROR("In NNet::buildOutputFromInput - 'first_hidden_layer' should be "
00678                     "from a subclass of NaryVariable");
00679         if (layer_var->varray.size() < 1)
00680             layer_var->varray.resize(1);
00681         layer_var->varray[0] =
00682             transpose_first_hidden_layer ? transpose(output)
00683                                          : output; // Here output = NNet input.
00684         layer_var->build(); // make sure everything is consistent and finish the build
00685         if (layer_var->varray.size()<2)
00686             PLERROR("In NNet::buildOutputFromInput - 'first_hidden_layer' should have parameters");
00687         int index_max_param =
00688             layer_var->varray.length() - n_non_params_in_first_hidden_layer;
00689         for (int i = 1; i < index_max_param; i++)
00690             params.append(layer_var->varray[i]);
00691         hidden_layer = transpose_first_hidden_layer ? transpose(layer_var)
00692                                                     : layer_var;
00693         output = hidden_layer;
00694     }
00695     else if(nhidden>0)
00696     {
00697         w1 = Var(1 + the_input->width(), nhidden, "w1");      
00698         params.append(w1);
00699         if (hidden_transfer_func == "ratio") {
00700             v1.resize(ratio_rank > 0 ? ratio_rank
00701                                      : ratio_rank == -1 ? the_input->width()
00702                                                         : 0);
00703             for (int i = 0; i < v1.length(); i++) {
00704                 v1[i] = Var(the_input->width(), nhidden, "v1[" + tostring(i) + "]");
00705                 params.append(v1[i]);
00706             }
00707         }
00708         hidden_layer = hiddenLayer(output, w1, "default", &v1);
00709         output = hidden_layer;
00710         // TODO BEWARE! This is not the same 'hidden_layer' as before.
00711     }
00712 
00713     // second hidden layer
00714     if(nhidden2>0)
00715     {
00716         PLASSERT( !first_hidden_layer_is_output );
00717         w2 = Var(1 + output.width(), nhidden2, "w2");
00718         params.append(w2);
00719         if (hidden_transfer_func == "ratio") {
00720             v2.resize(ratio_rank > 0 ? ratio_rank
00721                                      : ratio_rank == -1 ? output->width()
00722                                                         : 0);
00723             for (int i = 0; i < v2.length(); i++) {
00724                 v2[i] = Var(output->width(), nhidden2, "v2[" + tostring(i) + "]");
00725                 params.append(v2[i]);
00726             }
00727         }
00728         output = hiddenLayer(output, w2, "default", &v2);
00729     }
00730 
00731     if (nhidden2>0 && nhidden==0 && !first_hidden_layer)
00732         PLERROR("NNet:: can't have nhidden2 (=%d) > 0 while nhidden=0",nhidden2);
00733 
00734     if (rbf_layer_size>0)
00735     {
00736         if (first_class_is_junk)
00737         {
00738             rbf_centers = Var(outputsize()-1, rbf_layer_size, "rbf_centers");
00739             rbf_sigmas = Var(outputsize()-1, "rbf_sigmas");
00740             PLERROR("In NNet.cc, the code needs to be completed, rbf_layer isn't declared and thus it doesn't compile with the line below");
00741             // TODO (Also put back the corresponding include).
00742             //          output = hconcat(rbf_layer(output,rbf_centers,rbf_sigmas)&junk_prob);
00743             params.append(junk_prob);
00744         }
00745         else
00746         {
00747             rbf_centers = Var(outputsize(), rbf_layer_size, "rbf_centers");
00748             rbf_sigmas = Var(outputsize(), "rbf_sigmas");
00749             PLERROR("In NNet.cc, the code needs to be completed, rbf_layer isn't declared and thus it doesn't compile with the line below");
00750             //          output = rbf_layer(output,rbf_centers,rbf_sigmas);
00751         }
00752         params.append(rbf_centers);
00753         params.append(rbf_sigmas);
00754     }
00755 
00756     // Output layer before transfer function.
00757     if (!first_hidden_layer_is_output) {
00758         wout = Var(1 + output->width(), outputsize(), "wout");
00759         output = affine_transform(output, wout, true);
00760         output->setName("output_activations");
00761         if (!fixed_output_weights)
00762             params.append(wout);
00763         else
00764         {
00765             outbias = Var(1, output->width(), "outbias");
00766             output = output + outbias;
00767             params.append(outbias);
00768         }
00769     } else {
00770         // Verify we have provided a 'first_hidden_layer' Variable: even though
00771         // one might want to use this option without such a Var, it would be
00772         // simpler in this case to just set 'nhidden' to 0.
00773         if (!first_hidden_layer)
00774             PLERROR("In NNet::buildOutputFromInput - The option "
00775                     "'first_hidden_layer_is_output' can only be used in "
00776                     "conjunction with a 'first_hidden_layer' Variable");
00777     }
00778 
00779     // Direct in-to-out layer.
00780     if(direct_in_to_out)
00781     {
00782         wdirect = Var(the_input->width(), outputsize(), "wdirect");
00783         output += product(the_input, wdirect);
00784         params.append(wdirect);
00785         if (nhidden <= 0)
00786             PLERROR("In NNet::buildOutputFromInput - It seems weird to use direct in-to-out connections if there is no hidden layer anyway");
00787     }
00788 
00789     before_transfer_func = output;
00790     applyTransferFunc(before_transfer_func, output);
00791 }
00792 
00794 // applyTransferFunc //
00796 void NNet::applyTransferFunc(const Var& before_transfer_func, Var& output)
00797 {
00798     size_t p=0;
00799     if(output_transfer_func!="" && output_transfer_func!="none")
00800     {
00801         if(output_transfer_func=="tanh")
00802             output = tanh(before_transfer_func);
00803         else if(output_transfer_func=="sigmoid")
00804             output = sigmoid(before_transfer_func);
00805         else if(output_transfer_func=="softplus")
00806             output = softplus(before_transfer_func);
00807         else if(output_transfer_func=="exp")
00808             output = exp(before_transfer_func);
00809         else if(output_transfer_func=="softmax")
00810             output = softmax(before_transfer_func);
00811         else if (output_transfer_func == "log_softmax")
00812             output = log_softmax(before_transfer_func);
00813         else if ((p=output_transfer_func.find("interval"))!=string::npos)
00814         {
00815             size_t q = output_transfer_func.find(",");
00816             interval_minval = atof(output_transfer_func.substr(p+1,q-(p+1)).c_str());
00817             size_t r = output_transfer_func.find(")");
00818             interval_maxval = atof(output_transfer_func.substr(q+1,r-(q+1)).c_str());
00819             output = interval_minval + (interval_maxval - interval_minval)*sigmoid(before_transfer_func);
00820         }
00821         else
00822             PLERROR("In NNet::applyTransferFunc() -Unknown value for the "
00823                    "'output_transfer_func' option: %s",
00824                    output_transfer_func.c_str());
00825     }
00826 }
00827 
00829 // buildPenalties //
00831 void NNet::buildPenalties(const Var& hidden_layer) {
00832     penalties.resize(0);  // prevents penalties from being added twice by consecutive builds
00833     if(w1 && (!fast_exact_is_equal(layer1_weight_decay + weight_decay, 0) ||
00834               !fast_exact_is_equal(layer1_bias_decay + bias_decay,     0)))
00835         penalties.append(affine_transform_weight_penalty(w1, (layer1_weight_decay + weight_decay), (layer1_bias_decay + bias_decay), penalty_type));
00836     if(w2 && (!fast_exact_is_equal(layer2_weight_decay + weight_decay, 0) ||
00837               !fast_exact_is_equal(layer2_bias_decay + bias_decay,     0)))
00838         penalties.append(affine_transform_weight_penalty(w2, (layer2_weight_decay + weight_decay), (layer2_bias_decay + bias_decay), penalty_type));
00839     if(wout && (!fast_exact_is_equal(output_layer_weight_decay + weight_decay, 0) ||
00840                 !fast_exact_is_equal(output_layer_bias_decay + bias_decay,     0)))
00841         penalties.append(affine_transform_weight_penalty(wout, (output_layer_weight_decay + weight_decay), 
00842                                                          (output_layer_bias_decay + bias_decay), penalty_type));
00843     if(wdirect &&
00844        !fast_exact_is_equal(direct_in_to_out_weight_decay + weight_decay, 0))
00845     {
00846         if (penalty_type == "L1_square")
00847             penalties.append(square(sumabs(wdirect))*(direct_in_to_out_weight_decay + weight_decay));
00848         else if (penalty_type == "L1")
00849             penalties.append(sumabs(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00850         else if (penalty_type == "L2_square")
00851             penalties.append(sumsquare(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00852     }
00853     if (input_reconstruction_penalty>0)
00854     {
00855         wrec = Var(1 + hidden_layer->width(),input->width(),"wrec");
00856         predicted_input = affine_transform(hidden_layer, wrec, true);
00857         params.append(wrec);
00858         penalties.append(input_reconstruction_penalty*sumsquare(predicted_input - input));
00859     }
00860 }
00861 
00863 // buildTargetAndWeight //
00865 void NNet::buildTargetAndWeight() {
00866     int ts = operate_on_bags ? targetsize() - 1 // Remove bag information.
00867                              : targetsize();
00868     target = Var(1, ts, "target");
00869     if(weightsize_>0)
00870     {
00871         if (weightsize_!=1)
00872             PLERROR("In NNet::buildTargetAndWeight - Expected weightsize to be 1 or 0 (or unspecified = -1, meaning 0), got %d",weightsize_);
00873         sampleweight = Var(1, "weight");
00874     }
00875 }
00876 
00878 // computeCostsFromOutputs //
00880 void NNet::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv, 
00881                                    const Vec& targetv, Vec& costsv) const
00882 {
00883     PLASSERT_MSG( !operate_on_bags, "Not implemented" );
00884 #ifdef BOUNDCHECK
00885     // Stable cross entropy needs the value *before* the transfer function.
00886     if (cost_funcs.contains("stable_cross_entropy") or
00887        (cost_funcs.contains("NLL") and outputsize() == 1))
00888         PLERROR("In NNet::computeCostsFromOutputs - Cannot directly compute stable "
00889                 "cross entropy from output and target");
00890 #endif
00891     costsv.resize(nTestCosts());
00892     output_and_target_to_cost->fprop(outputv&targetv, costsv); 
00893 }
00894 
00896 // computeOutput //
00898 void NNet::computeOutput(const Vec& inputv, Vec& outputv) const
00899 {
00900     if (operate_on_bags)
00901         PLERROR("In NNet::computeOutput - Cannot compute output without bag "
00902                 "information");
00903     outputv.resize(outputsize());
00904     input_to_output->fprop(inputv,outputv);
00905 }
00906 
00908 // computeOutputAndCosts //
00910 void NNet::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
00911                                  Vec& outputv, Vec& costsv) const
00912 {
00913     outputv.resize(outputsize());
00914     costsv.resize(nTestCosts());
00915     if (!operate_on_bags)
00916         test_costf->fprop(inputv&targetv, outputv&costsv);
00917     else {
00918         // We can only compute the output once the whole bag has been seen.
00919         int last_target_idx = targetv.length() - 1;
00920         int bag_info = int(round(targetv[last_target_idx]));
00921         if (bag_info & SumOverBagsVariable::TARGET_COLUMN_FIRST)
00922             store_bag_size[0] = 0;
00923         store_bag_inputs(int(round(store_bag_size[0]))) << inputv;
00924         store_bag_size[0]++;
00925         if (bag_info & SumOverBagsVariable::TARGET_COLUMN_LAST)
00926             test_costf->fprop(store_bag_inputs.toVec()
00927                                     & store_bag_size
00928                                     & targetv.subVec(0, last_target_idx),
00929                               outputv & costsv);
00930         else {
00931             outputv.fill(MISSING_VALUE);
00932             costsv.fill(MISSING_VALUE);
00933         }
00934     }
00935 }
00936 
00938 // fillWeights //
00940 void NNet::fillWeights(const Var& weights, bool clear_first_row)
00941 {
00942     if (!weights)
00943         return;
00944 
00945     if (initialization_method == "zero")
00946     {
00947         weights->value->clear();
00948         return;
00949     }
00950     real delta;
00951     int is = weights.length();
00952     if (clear_first_row)
00953         is--; // -1 to get the same result as before.
00954     if (initialization_method.find("linear") != string::npos)
00955         delta = 1.0 / real(is);
00956     else
00957         delta = 1.0 / sqrt(real(is));
00958     if (initialization_method.find("normal") != string::npos)
00959         random_gen->fill_random_normal(weights->value, 0, delta);
00960     else
00961         random_gen->fill_random_uniform(weights->value, -delta, delta);
00962     if (clear_first_row)
00963         weights->matValue(0).clear();
00964 }
00965 
00967 // forget //
00969 void NNet::forget()
00970 {
00971     inherited::forget();
00972     if (train_set) initializeParams();
00973     if(optimizer)
00974         optimizer->reset();
00975     stage = 0;
00976     n_training_bags = -1;
00977 }
00978 
00980 // getCost //
00982 Var NNet::getCost(const string& costname, const Var& the_output,
00983                   const Var& the_target, const Var& before_transfer_func)
00984 {
00985     // We don't need to take into account the sampleweight, because it is
00986     // taken care of in stats->update.
00987     if (costname=="mse") {
00988         // The following assert may be useful since 'operator-' on variables
00989         // can be used to do subtractions on Variables of different sizes,
00990         // which should not be the case in a NNet.
00991         PLASSERT( the_output->length() == the_target->length() &&
00992                   the_output->width() == the_target->width() );
00993         return sumsquare(the_output - the_target);
00994     } else if (costname=="mse_onehot")
00995         return onehot_squared_loss(the_output, the_target);
00996     else if (costname=="NLL") 
00997     {
00998         if (the_output->width() == 1) {
00999             // Assume sigmoid output here!
01000             return stable_cross_entropy(before_transfer_func, the_target);
01001         } else {
01002             if (output_transfer_func == "log_softmax")
01003                 return -the_output[the_target];
01004             else
01005                 return neg_log_pi(the_output, the_target);
01006         }
01007     } 
01008     else if (costname=="class_error")
01009     {
01010         if (the_output->width()==1)
01011             return binary_classification_loss(the_output, the_target);
01012         else {
01013             Var targ = the_target;
01014             if (targetsize() > 1)
01015                 // One-hot encoding of target: we need to convert it to an
01016                 // index in order to be able to use 'classification_loss'.
01017                 targ = argmax(the_target);
01018             return classification_loss(the_output, targ);
01019         }
01020     }
01021     else if (costname=="binary_class_error")
01022         return binary_classification_loss(the_output, the_target);
01023     else if (costname=="multiclass_error")
01024         return multiclass_loss(the_output, the_target);
01025     else if (costname=="cross_entropy")
01026         return cross_entropy(the_output, the_target);
01027     else if(costname=="conf_rated_adaboost_cost")
01028     {
01029         if(output_transfer_func != "sigmoid")
01030             PLWARNING("In NNet:buildCosts(): conf_rated_adaboost_cost expects an output in (0,1)");
01031         alpha_adaboost = Var(1,1); alpha_adaboost->value[0] = 1.0;
01032         params.append(alpha_adaboost);
01033         return conf_rated_adaboost_cost(the_output, the_target, alpha_adaboost);
01034     }
01035     else if (costname=="gradient_adaboost_cost")
01036     {
01037         if(output_transfer_func != "sigmoid")
01038             PLWARNING("In NNet:buildCosts(): gradient_adaboost_cost expects an output in (0,1)");
01039         return gradient_adaboost_cost(the_output, the_target);
01040     }
01041     else if (costname=="stable_cross_entropy") {
01042         Var c = stable_cross_entropy(before_transfer_func, the_target);
01043         PLASSERT( classification_regularizer >= 0 );
01044         if (classification_regularizer > 0) {
01045             // There is a regularizer to add to the cost function.
01046             dynamic_cast<NegCrossEntropySigmoidVariable*>((Variable*) c)->
01047                 setRegularizer(classification_regularizer);
01048         }
01049         return c;
01050     }
01051     else if (costname=="margin_perceptron_cost")
01052         return margin_perceptron_cost(the_output,the_target,margin);
01053     else if (costname=="lift_output")
01054         return lift_output(the_output, the_target);
01055     else if (costname=="poisson_nll") {
01056         VarArray the_varray(the_output, the_target);
01057         if (weightsize()>0) {
01058             PLERROR("In NNet::getCost - The weight is used, is this really "
01059                     "intended? (see comment in code at the top of this "
01060                     "method");
01061             the_varray.push_back(sampleweight);
01062         }
01063         return neglogpoissonvariable(the_varray);
01064     }
01065     else if (costname == "L1")
01066         return sumabs(the_output - the_target);
01067     else {
01068         // Assume we got a Variable name and its options                
01069         Var cost = dynamic_cast<Variable*>(newObject(costname));
01070         if(cost.isNull())
01071             PLERROR("In NNet::build_() - unknown cost name: %s",
01072                     costname.c_str());
01073         cost->setParents(the_output & the_target);
01074         cost->build();
01075         return cost;
01076     }
01077 }
01078 
01080 // getTrainCostNames //
01082 TVec<string> NNet::getTrainCostNames() const
01083 {
01084     PLASSERT( !cost_funcs.isEmpty() );
01085     int n_costs = cost_funcs.length();
01086     TVec<string> train_costs(n_costs + 1);
01087     train_costs[0] = cost_funcs[0] + "+penalty";
01088     train_costs.subVec(1, n_costs) << cost_funcs;
01089     return train_costs;
01090 }
01091 
01093 // getTestCostNames //
01095 TVec<string> NNet::getTestCostNames() const
01096 { 
01097     return cost_funcs;
01098 }
01099 
01101 // hiddenLayer //
01103 Var NNet::hiddenLayer(const Var& input, const Var& weights, string transfer_func,
01104                       VarArray* ratio_quad_weights) {
01105     Var hidden = affine_transform(input, weights, true);
01106     hidden->setName("hidden_layer_activations");
01107     Var result;
01108     if (transfer_func == "default")
01109         transfer_func = hidden_transfer_func;
01110     if(transfer_func=="linear")
01111         result = hidden;
01112     else if(transfer_func=="tanh")
01113         result = tanh(hidden);
01114     else if(transfer_func=="sigmoid")
01115         result = sigmoid(hidden);
01116     else if(transfer_func=="softplus")
01117         result = softplus(hidden);
01118     else if(transfer_func=="exp")
01119         result = exp(hidden);
01120     else if(transfer_func=="softmax")
01121         result = softmax(hidden);
01122     else if (transfer_func == "log_softmax")
01123         result = log_softmax(hidden);
01124     else if(transfer_func=="hard_slope")
01125         result = unary_hard_slope(hidden,0,1);
01126     else if(transfer_func=="symm_hard_slope")
01127         result = unary_hard_slope(hidden,-1,1);
01128     else if (transfer_func == "ratio") {
01129         PLASSERT( ratio_quad_weights );
01130         Var softp = new SoftplusVariable(hidden);
01131         Var before_ratio = softp;
01132         if (ratio_rank != 0) {
01133             // Compute quadratic term.
01134             VarArray quad_terms(ratio_quad_weights->length());
01135             for (int i = 0; i < ratio_quad_weights->length(); i++) {
01136                 quad_terms[i] = new SquareVariable(
01137                         new ProductVariable(input, (*ratio_quad_weights)[i]));
01138             }
01139             Var sum_quad_terms = new PlusManyVariable(quad_terms);
01140             // Add the softplus term.
01141             Var softp_square = new SquareVariable(softp);
01142             Var total = new PlusVariable(sum_quad_terms, softp_square);
01143             // Take the square root.
01144             before_ratio = new SquareRootVariable(total);
01145         }
01146         // Perform ratio.
01147         result = new DivVariable(before_ratio,
01148                                  new PlusConstantVariable(before_ratio, 1.0));
01149     }
01150     else
01151         PLERROR("In NNet::hiddenLayer - Unknown value for transfer_func: %s",transfer_func.c_str());
01152     return result;
01153 }
01154 
01156 // initializeParams //
01158 void NNet::initializeParams(bool set_seed)
01159 {
01160     if (set_seed && seed_ != 0)
01161         random_gen->manual_seed(seed_);
01162 
01163     if (nhidden>0) {
01164         if (!first_hidden_layer) {
01165             fillWeights(w1, true);
01166             for (int i = 0; i < v1.length(); i++)
01167                 fillWeights(v1[i], true);
01168         }
01169         if (direct_in_to_out)
01170             fillWeights(wdirect, false);
01171     }
01172 
01173     if(nhidden2>0) {
01174         fillWeights(w2, true);
01175         for (int i = 0; i < v2.length(); i++)
01176             fillWeights(v2[i], true);
01177     }
01178 
01179     if (fixed_output_weights) {
01180         static Vec values;
01181         if (values.size()==0)
01182         {
01183             values.resize(2);
01184             values[0]=-1;
01185             values[1]=1;
01186         }
01187         random_gen->fill_random_discrete(wout->value, values);
01188         wout->matValue(0).clear();
01189     }
01190     else {
01191         fillWeights(wout, true);
01192     }
01193 }
01194 
01196 #ifdef __INTEL_COMPILER
01197 #pragma warning(disable:1419)  // Get rid of compiler warning.
01198 #endif
01199 extern void varDeepCopyField(Var& field, CopiesMap& copies);
01200 #ifdef __INTEL_COMPILER
01201 #pragma warning(default:1419)
01202 #endif
01203 
01204 
01206 // makeDeepCopyFromShallowCopy //
01208 void NNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01209 {
01210 
01211     inherited::makeDeepCopyFromShallowCopy(copies);
01212 
01213     // protected:
01214     varDeepCopyField(rbf_centers, copies);
01215     varDeepCopyField(rbf_sigmas, copies);
01216     varDeepCopyField(junk_prob, copies);
01217     varDeepCopyField(alpha_adaboost,copies);
01218     varDeepCopyField(output, copies);
01219     varDeepCopyField(predicted_input, copies);
01220     deepCopyField(costs, copies);
01221     deepCopyField(penalties, copies);
01222     varDeepCopyField(training_cost, copies);
01223     varDeepCopyField(test_costs, copies);
01224     deepCopyField(invars, copies);
01225     deepCopyField(params, copies);
01226     varDeepCopyField(bag_inputs, copies);
01227     deepCopyField(store_bag_inputs, copies);
01228     varDeepCopyField(bag_size, copies);
01229     deepCopyField(store_bag_size, copies);
01230 
01231     // public:
01232     deepCopyField(paramsvalues, copies);
01233     varDeepCopyField(input, copies);
01234     varDeepCopyField(target, copies);
01235     varDeepCopyField(sampleweight, copies);
01236     varDeepCopyField(w1, copies);
01237     varDeepCopyField(w2, copies);
01238     deepCopyField(v1, copies);
01239     deepCopyField(v2, copies);
01240     varDeepCopyField(wout, copies);
01241     varDeepCopyField(outbias, copies);
01242     varDeepCopyField(wdirect, copies);
01243     varDeepCopyField(wrec, copies);
01244     varDeepCopyField(hidden_layer, copies);
01245     deepCopyField(input_to_output, copies);
01246     deepCopyField(test_costf, copies);
01247     deepCopyField(output_and_target_to_cost, copies);
01248     varDeepCopyField(first_hidden_layer, copies);
01249     deepCopyField(cost_funcs, copies);
01250     deepCopyField(optimizer, copies);
01251 }
01252 
01254 // outputsize //
01256 int NNet::outputsize() const {
01257     return noutputs;
01258 }
01259 
01261 // train //
01263 void NNet::train()
01264 {
01265     // NNet nstages is number of epochs (whole passages through the training set)
01266     // while optimizer nstages is number of weight updates.
01267     // So relationship between the 2 depends on whether we are in stochastic,
01268     // batch or minibatch mode.
01269 
01270     if(!train_set)
01271         PLERROR("In NNet::train - No training set available");
01272 
01273     if (operate_on_bags && n_training_bags < 0) {
01274         // Compute the number of bags in the training set.
01275         int n_train = train_set->length();
01276         PP<ProgressBar> pb = 
01277             report_progress ? new ProgressBar("Counting bags", n_train)
01278                             : NULL;
01279         Vec input, target;
01280         real weight;
01281         n_training_bags = 0;
01282         for (int i = 0; i < n_train; i++) {
01283             train_set->getExample(i, input, target, weight);
01284             if (int(round(target.lastElement()))
01285                 & SumOverBagsVariable::TARGET_COLUMN_FIRST)
01286                 n_training_bags++;
01287             if (pb)
01288                 pb->update(i);
01289         }
01290     }
01291 
01292     if(!train_stats)
01293         setTrainStatsCollector(new VecStatsCollector());
01294     // PLERROR("In NNet::train, you did not setTrainStatsCollector");
01295 
01296     int n_train = operate_on_bags ? n_training_bags
01297                                   : train_set->length();  
01298 
01299     if(input_to_output.isNull())
01300     {
01301         // Net has not been properly built yet (because build was called before the learner had a proper training set)
01302         build();
01303         if (input_to_output.isNull())
01304             PLERROR(
01305                 "NNet::build was not able to properly build the network.\n"
01306                 "Please check that your variables have an appropriate value,\n"
01307                 "that your training set is correctly defined, that its sizes\n"
01308                 "are consistent, that its targetsize is not -1...");
01309     }
01310 
01311     // number of samples seen by optimizer before each optimizer update
01312     int nsamples = batch_size>0 ? batch_size : n_train;
01313     Func paramf = Func(invars, training_cost); // parameterized function to optimize
01314     Var totalcost =
01315         operate_on_bags ? sumOverBags(train_set, paramf, max_bag_size,
01316                                       nsamples, true)
01317                         : meanOf(train_set, paramf, nsamples);
01318     if(optimizer)
01319     {
01320         optimizer->setToOptimize(params, totalcost);  
01321         optimizer->build();
01322     }
01323     else PLERROR("NNet::train can't train without setting an optimizer first!");
01324 
01325     // number of optimizer stages corresponding to one learner stage (one epoch)
01326     int optstage_per_lstage = n_train / nsamples;
01327 
01328     PP<ProgressBar> pb;
01329     if(report_progress)
01330         pb = new ProgressBar("Training " + classname() + " from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01331 
01332 
01333     // Open/create vmat to save train costs at each epoch.
01334     VMat costs_per_epoch= 0;
01335     if(!expdir.isEmpty())
01336     {
01337         PPath cpe_path= expdir / "NNet_train_costs.pmat";
01338         if(isfile(cpe_path))
01339             costs_per_epoch= new FileVMatrix(cpe_path, true);
01340         else
01341         {
01342             TVec<string> fieldnames(1, "epoch");
01343             fieldnames.append(train_stats->getFieldNames());
01344             costs_per_epoch= new FileVMatrix(cpe_path, 0, fieldnames);
01345         }
01346     }
01347 
01348     int initial_stage = stage;
01349     bool early_stop=false;
01350     while(stage<nstages && !early_stop)
01351     {
01352         optimizer->nstages = optstage_per_lstage;
01353         train_stats->forget();
01354         optimizer->early_stop = false;
01355         early_stop = optimizer->optimizeN(*train_stats);
01356         // optimizer->verifyGradient(1e-6); // Uncomment if you want to check your new Var.
01357         train_stats->finalize();
01358         if(verbosity>2)
01359             pout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
01360         if(costs_per_epoch)
01361         {
01362             Vec v(1, stage);
01363             v.append(train_stats->getMean());
01364             costs_per_epoch->appendRow(v);
01365         }
01366         ++stage;
01367         if(pb)
01368             pb->update(stage-initial_stage);
01369     }
01370     if(verbosity>1)
01371         pout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
01372 
01373     output_and_target_to_cost->recomputeParents();
01374     test_costf->recomputeParents();
01375     // cerr << "totalcost->value = " << totalcost->value << endl;
01376     // cout << "Result for benchmark is: " << totalcost->value << endl;
01377 }
01378 
01379 } // end of namespace PLearn
01380 
01381 
01382 /*
01383   Local Variables:
01384   mode:c++
01385   c-basic-offset:4
01386   c-file-style:"stroustrup"
01387   c-file-offsets:((innamespace . 0)(inline-open . 0))
01388   indent-tabs-mode:nil
01389   fill-column:79
01390   End:
01391 */
01392 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines