PLearn 0.1
NatGradNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NatGradNNet.cc
00004 //
00005 // Copyright (C) 2007 Yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Yoshua Bengio
00036 
00039 //#include <sstream>  // *stat* for output
00040 #include "NatGradNNet.h"
00041 #include <plearn/math/pl_erf.h>
00042 
00043 namespace PLearn {
00044 using namespace std;
00045 
00046 PLEARN_IMPLEMENT_OBJECT(
00047     NatGradNNet,
00048     "Multi-layer neural network trained with an efficient Natural Gradient optimization",
00049     "A separate covariance matrix is estimated for the gradients associated with the\n"
00050     "the input weights of each neuron, and a covariance matrix between the gradients\n"
00051     "on the neurons is also computed. These are combined to obtained an adjusted gradient\n"
00052     "on all the parameters. The class GradientCorrector embodies the adjustment algorithm.\n"
00053     "Users may specify different options for the estimator that is used for correcting\n"
00054     "the neurons gradients and for the estimator that is used for correcting the\n"
00055     "parameters gradients (separately for each neuron).\n"
00056     );
00057 
00058 NatGradNNet::NatGradNNet()
00059     : noutputs(-1),
00060       params_averaging_coeff(1.0),
00061       params_averaging_freq(5),
00062       init_lrate(0.01),
00063       lrate_decay(0),
00064       output_layer_L1_penalty_factor(0.0),
00065       output_layer_lrate_scale(1),
00066       minibatch_size(1),
00067       output_type("NLL"),
00068       input_size_lrate_normalization_power(0),
00069       lrate_scale_factor(3),
00070       lrate_scale_factor_max_power(0),
00071       lrate_scale_factor_min_power(0),
00072       self_adjusted_scaling_and_bias(false),
00073       target_mean_activation(-4), // 
00074       target_stdev_activation(3), // 2.5% of the time we are above 1
00075       //corr_profiling_start(0), 
00076       //corr_profiling_end(0),
00077       n_layers(-1),
00078       cumulative_training_time(0)
00079 {
00080     random_gen = new PRandom();
00081 }
00082 
00083 void NatGradNNet::declareOptions(OptionList& ol)
00084 {
00085     declareOption(ol, "noutputs", &NatGradNNet::noutputs,
00086                   OptionBase::buildoption,
00087                   "Number of outputs of the neural network, which can be derived from  output_type and targetsize_\n");
00088 
00089     declareOption(ol, "n_layers", &NatGradNNet::n_layers,
00090                   OptionBase::learntoption,
00091                   "Number of layers of weights (ie. 2 for a neural net with one hidden layer).\n"
00092                   "Needs not be specified explicitly (derived from hidden_layer_sizes).\n");
00093 
00094     declareOption(ol, "hidden_layer_sizes", &NatGradNNet::hidden_layer_sizes,
00095                   OptionBase::buildoption,
00096                   "Defines the architecture of the multi-layer neural network by\n"
00097                   "specifying the number of hidden units in each hidden layer.\n");
00098 
00099     declareOption(ol, "layer_sizes", &NatGradNNet::layer_sizes,
00100                   OptionBase::learntoption,
00101                   "Derived from hidden_layer_sizes, inputsize_ and noutputs\n");
00102 
00103     declareOption(ol, "cumulative_training_time", &NatGradNNet::cumulative_training_time,
00104                   OptionBase::learntoption,
00105                   "Cumulative training time since age=0, in seconds.\n");
00106 
00107     declareOption(ol, "layer_params", &NatGradNNet::layer_params,
00108                   OptionBase::learntoption,
00109                   "Parameters used while training, for each layer, organized as follows: layer_params[i] \n"
00110                   "is a matrix of dimension layer_sizes[i+1] x (layer_sizes[i]+1)\n"
00111                   "containing the neuron biases in its first column.\n");
00112 
00113     declareOption(ol, "activations_scaling", &NatGradNNet::activations_scaling,
00114                   OptionBase::learntoption,
00115                   "Scaling coefficients for each neuron of each layer, if self_adjusted_scaling_and_bias:\n"
00116                   " output = tanh(activations_scaling[layer][neuron] * (biases[layer][neuron] + weights[layer]*input[layer-1])\n");
00117 
00118     declareOption(ol, "layer_mparams", &NatGradNNet::layer_mparams,
00119                   OptionBase::learntoption,
00120                   "Test parameters for each layer, organized like layer_params.\n"
00121                   "This is a moving average of layer_params, computed with\n"
00122                   "coefficient params_averaging_coeff. Thus the mparams are\n"
00123                   "a smoothed version of the params, and they are used only\n"
00124                   "during testing.\n");
00125 
00126     declareOption(ol, "params_averaging_coeff", &NatGradNNet::params_averaging_coeff,
00127                   OptionBase::buildoption,
00128                   "Coefficient used to control how fast we forget old parameters\n"
00129                   "in the moving average performed as follows:\n"
00130                   "mparams <-- (1-params_averaging_coeff)mparams + params_averaging_coeff*params\n");
00131 
00132     declareOption(ol, "params_averaging_freq", &NatGradNNet::params_averaging_freq,
00133                   OptionBase::buildoption,
00134                   "How often (in terms of number of minibatches, i.e. weight updates)\n"
00135                   "do we perform the moving average update calculation\n"
00136                   "mparams <-- (1-params_averaging_coeff)mparams + params_averaging_coeff*params\n");
00137 
00138     declareOption(ol, "init_lrate", &NatGradNNet::init_lrate,
00139                   OptionBase::buildoption,
00140                   "Initial learning rate\n");
00141 
00142     declareOption(ol, "lrate_decay", &NatGradNNet::lrate_decay,
00143                   OptionBase::buildoption,
00144                   "Learning rate decay factor\n");
00145 
00146     declareOption(ol, "output_layer_L1_penalty_factor",
00147                   &NatGradNNet::output_layer_L1_penalty_factor,
00148                   OptionBase::buildoption,
00149                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00150                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| during training.\n"
00151                   "Gets multiplied by the learning rate. Only on output layer!!");
00152 
00153     declareOption(ol, "output_layer_lrate_scale", &NatGradNNet::output_layer_lrate_scale,
00154                   OptionBase::buildoption,
00155                   "Scaling factor of the learning rate for the output layer. Values less than 1"
00156                   "mean that the output layer parameters have a smaller learning rate than the others.\n");
00157 
00158     declareOption(ol, "minibatch_size", &NatGradNNet::minibatch_size,
00159                   OptionBase::buildoption,
00160                   "Update the parameters only so often (number of examples).\n"
00161                   "Must be greater or equal to test_minibatch_size\n");
00162 
00163     declareOption(ol, "neurons_natgrad_template", &NatGradNNet::neurons_natgrad_template,
00164                   OptionBase::buildoption,
00165                   "Optional template GradientCorrector for the neurons gradient.\n"
00166                   "If not provided, then the natural gradient correction\n"
00167                   "on the neurons gradient is not performed.\n");
00168 
00169     declareOption(ol, "neurons_natgrad_per_layer", 
00170                   &NatGradNNet::neurons_natgrad_per_layer,
00171                   OptionBase::learntoption,
00172                   "Vector of GradientCorrector objects for the gradient on the neurons of each layer.\n"
00173                   "They are copies of the neuron_natgrad_template provided by the user.\n");
00174 
00175     declareOption(ol, "params_natgrad_template", 
00176                   &NatGradNNet::params_natgrad_template,
00177                   OptionBase::buildoption,
00178                   "Optional template GradientCorrector object for the gradient of the parameters inside each neuron\n"
00179                   "It is replicated in the params_natgrad vector, for each neuron\n"
00180                   "If not provided, then the neuron-specific natural gradient estimator is not used.\n");
00181 
00182     declareOption(ol, "params_natgrad_per_input_template",
00183                   &NatGradNNet::params_natgrad_per_input_template,
00184                   OptionBase::buildoption,
00185                   "Optional template GradientCorrector object for the gradient of the parameters of the first layer\n"
00186                   "grouped based upon their input. It is replicated in the params_natgrad_per_group vector, for each group.\n"
00187                   "If provided, overides the params_natgrad_template for the parameters of the first layer.\n");
00188 
00189     declareOption(ol, "params_natgrad_per_group", 
00190                     &NatGradNNet::params_natgrad_per_group,
00191                     OptionBase::learntoption,
00192                     "Vector of GradientCorrector objects for the gradient inside groups of parameters.\n"
00193                     "They are copies of the params_natgrad_template and params_natgrad_per_input_template\n"
00194                     "templates provided by the user.\n");
00195 
00196     declareOption(ol, "full_natgrad", &NatGradNNet::full_natgrad,
00197                   OptionBase::buildoption,
00198                   "GradientCorrector for all the parameter gradients simultaneously.\n"
00199                   "This should not be set if neurons_natgrad or params_natgrad_template\n"
00200                   "is provided. If none of the GradientCorrectors is provided, then\n"
00201                   "regular stochastic gradient is performed.\n");
00202 
00203     declareOption(ol, "output_type", 
00204                   &NatGradNNet::output_type,
00205                   OptionBase::buildoption,
00206                   "type of output cost: 'cross_entropy' for binary classification,\n"
00207                   "'NLL' for classification problems(noutputs>=1),"
00208                   " 'cross_entropy' for classification(noutputs==1)"
00209                   " or 'MSE' for regression.\n");
00210 
00211     declareOption(ol, "input_size_lrate_normalization_power", 
00212                   &NatGradNNet::input_size_lrate_normalization_power, 
00213                   OptionBase::buildoption,
00214                   "Scale the learning rate neuron-wise (or layer-wise actually, here):\n"
00215                   "-1 scales by 1 / ||x||^2, where x is the 1-extended input vector of the neuron\n"
00216                   "0 does not scale the learning rate\n"
00217                   "1 scales it by 1 / the nb of inputs of the neuron\n"
00218                   "2 scales it by 1 / sqrt(the nb of inputs of the neuron), etc.\n");
00219 
00220     declareOption(ol, "lrate_scale_factor",
00221                   &NatGradNNet::lrate_scale_factor,
00222                   OptionBase::buildoption,
00223                   "scale the learning rate in different neurons by a factor\n"
00224                   "taken randomly as follows: choose integer n uniformly between\n"
00225                   "lrate_scale_factor_min_power and lrate_scale_factor_max_power\n"
00226                   "inclusively, and then scale learning rate by lrate_scale_factor^n.\n");
00227 
00228     declareOption(ol, "lrate_scale_factor_max_power",
00229                   &NatGradNNet::lrate_scale_factor_max_power,
00230                   OptionBase::buildoption,
00231                   "See help on lrate_scale_factor\n");
00232 
00233     declareOption(ol, "lrate_scale_factor_min_power",
00234                   &NatGradNNet::lrate_scale_factor_min_power,
00235                   OptionBase::buildoption,
00236                   "See help on lrate_scale_factor\n");
00237 
00238     declareOption(ol, "self_adjusted_scaling_and_bias",
00239                   &NatGradNNet::self_adjusted_scaling_and_bias,
00240                   OptionBase::buildoption,
00241                   "If true, let each neuron self-adjust its bias and scaling factor\n"
00242                   "of its activations so that the mean and standard deviation of the\n"
00243                   "activations reach the target_mean_activation and target_stdev_activation.\n"
00244                   "The activations mean and variance are estimated by a moving average with\n"
00245                   "coefficient given by activations_statistics_moving_average_coefficient\n");
00246 
00247     declareOption(ol, "target_mean_activation",
00248                   &NatGradNNet::target_mean_activation,
00249                   OptionBase::buildoption,
00250                   "See help on self_adjusted_scaling_and_bias\n");
00251 
00252     declareOption(ol, "target_stdev_activation",
00253                   &NatGradNNet::target_stdev_activation,
00254                   OptionBase::buildoption,
00255                   "See help on self_adjusted_scaling_and_bias\n");
00256 
00257     declareOption(ol, "activation_statistics_moving_average_coefficient",
00258                   &NatGradNNet::activation_statistics_moving_average_coefficient,
00259                   OptionBase::buildoption,
00260                   "The activations mean and variance used for self_adjusted_scaling_and_bias\n"
00261                   "are estimated by a moving average with this coefficient:\n"
00262                   "   xbar <-- coefficient * xbar + (1-coefficient) x\n"
00263                   "where x could be the activation or its square\n");
00264 
00265     //declareOption(ol, "corr_profiling_start",
00266     //              &NatGradNNet::corr_profiling_start,
00267     //              OptionBase::buildoption,
00268     //              "Stage to start the profiling of the gradients' and the\n"
00269     //              "natural gradients' correlation.\n");
00270 
00271     //declareOption(ol, "corr_profiling_end",
00272     //              &NatGradNNet::corr_profiling_end,
00273     //              OptionBase::buildoption,
00274     //              "Stage to end the profiling of the gradients' and the\n"
00275     //              "natural gradients' correlations.\n");
00276 
00277     // Now call the parent class' declareOptions
00278     inherited::declareOptions(ol);
00279 }
00280 
00281 void NatGradNNet::build_()
00282 {
00283     if (!train_set)
00284         return;
00285     inputsize_ = train_set->inputsize();
00286     if (output_type=="MSE")
00287     {
00288         if (noutputs<0) noutputs = targetsize_;
00289         else PLASSERT_MSG(noutputs==targetsize_,"NatGradNNet: noutputs should be -1 or match data's targetsize");
00290     }
00291     else if (output_type=="NLL")
00292     {
00293         if (noutputs<0)
00294             PLERROR("NatGradNNet: if output_type=NLL (classification), one \n"
00295                     "should provide noutputs = number of classes, or possibly\n"
00296                     "1 when 2 classes\n");
00297     }
00298     else if (output_type=="cross_entropy")
00299     {
00300         if(noutputs!=1)
00301             PLERROR("NatGradNNet: if output_type=cross_entropy, then \n"
00302                     "noutputs should be 1.\n");
00303     }
00304     else PLERROR("NatGradNNet: output_type should be cross_entropy, NLL or MSE\n");
00305 
00306     if( output_layer_L1_penalty_factor < 0. )
00307         PLWARNING("NatGradNNet::build_ - output_layer_L1_penalty_factor is negative!\n");
00308 
00309     while (hidden_layer_sizes.length()>0 && hidden_layer_sizes[hidden_layer_sizes.length()-1]==0)
00310         hidden_layer_sizes.resize(hidden_layer_sizes.length()-1);
00311     n_layers = hidden_layer_sizes.length()+2;
00312     layer_sizes.resize(n_layers);
00313     layer_sizes.subVec(1,n_layers-2) << hidden_layer_sizes;
00314     layer_sizes[0]=inputsize_;
00315     layer_sizes[n_layers-1]=noutputs;
00316     layer_params.resize(n_layers-1);
00317     layer_mparams.resize(n_layers-1);
00318     layer_params_delta.resize(n_layers-1);
00319     layer_params_gradient.resize(n_layers-1);
00320     biases.resize(n_layers-1);
00321     activations_scaling.resize(n_layers-1);
00322     weights.resize(n_layers-1);
00323     mweights.resize(n_layers-1);
00324     mean_activations.resize(n_layers-1);
00325     var_activations.resize(n_layers-1);
00326     int n_neurons=0;
00327     int n_params=0;
00328     for (int i=0;i<n_layers-1;i++)
00329     {
00330         n_neurons+=layer_sizes[i+1];
00331         n_params+=layer_sizes[i+1]*(1+layer_sizes[i]);
00332     }
00333     all_params.resize(n_params);
00334     all_mparams.resize(n_params);
00335     all_params_gradient.resize(n_params);
00336     all_params_delta.resize(n_params);
00337     //all_params_cum_gradient.resize(n_params); // *stat*
00338 
00339     // depending on how parameters are grouped on the first layer
00340     int n_groups = params_natgrad_per_input_template ? (n_neurons-layer_sizes[1]+layer_sizes[0]+1) : n_neurons;
00341     group_params.resize(n_groups);
00342     group_params_delta.resize(n_groups);
00343     group_params_gradient.resize(n_groups);
00344 
00345     for (int i=0,k=0,p=0;i<n_layers-1;i++)
00346     {
00347         int np=layer_sizes[i+1]*(1+layer_sizes[i]);
00348         // First layer has natural gradient applied on groups of parameters
00349         // linked to the same input -> parameters must be stored TRANSPOSED!
00350         if( i==0 && params_natgrad_per_input_template ) {
00351             layer_params[i]=all_params.subVec(p,np).toMat(layer_sizes[i]+1,layer_sizes[i+1]);
00352             layer_mparams[i]=all_mparams.subVec(p,np).toMat(layer_sizes[i]+1,layer_sizes[i+1]);
00353             biases[i]=layer_params[i].subMatRows(0,1);
00354             weights[i]=layer_params[i].subMatRows(1,layer_sizes[i]); //weights[0] from layer 0 to layer 1
00355             mweights[i]=layer_mparams[i].subMatRows(1,layer_sizes[i]); //weights[0] from layer 0 to layer 1
00356             layer_params_gradient[i]=all_params_gradient.subVec(p,np).toMat(layer_sizes[i]+1,layer_sizes[i+1]);
00357             layer_params_delta[i]=all_params_delta.subVec(p,np);
00358             for (int j=0;j<layer_sizes[i]+1;j++,k++)   // include a bias input 
00359             {
00360                 group_params[k]=all_params.subVec(p,layer_sizes[i+1]);
00361                 group_params_delta[k]=all_params_delta.subVec(p,layer_sizes[i+1]);
00362                 group_params_gradient[k]=all_params_gradient.subVec(p,layer_sizes[i+1]);
00363                 p+=layer_sizes[i+1];
00364             }
00365         // Usual parameter storage
00366         }   else    {
00367             layer_params[i]=all_params.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00368             layer_mparams[i]=all_mparams.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00369             biases[i]=layer_params[i].subMatColumns(0,1);
00370             weights[i]=layer_params[i].subMatColumns(1,layer_sizes[i]); // weights[0] from layer 0 to layer 1
00371             mweights[i]=layer_mparams[i].subMatColumns(1,layer_sizes[i]); // weights[0] from layer 0 to layer 1
00372             layer_params_gradient[i]=all_params_gradient.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00373             layer_params_delta[i]=all_params_delta.subVec(p,np);
00374             for (int j=0;j<layer_sizes[i+1];j++,k++)
00375             {
00376                 group_params[k]=all_params.subVec(p,1+layer_sizes[i]);
00377                 group_params_delta[k]=all_params_delta.subVec(p,1+layer_sizes[i]);
00378                 group_params_gradient[k]=all_params_gradient.subVec(p,1+layer_sizes[i]);
00379                 p+=1+layer_sizes[i];
00380             }
00381         }
00382         activations_scaling[i].resize(layer_sizes[i+1]);
00383         mean_activations[i].resize(layer_sizes[i+1]);
00384         var_activations[i].resize(layer_sizes[i+1]);
00385     }
00386     if (params_natgrad_template || params_natgrad_per_input_template)
00387     {
00388         int n_input_groups=0;
00389         int n_neuron_groups=0;
00390         if(params_natgrad_template)
00391             n_neuron_groups = n_neurons;
00392         if( params_natgrad_per_input_template ) {
00393             n_input_groups = layer_sizes[0]+1;
00394             if(params_natgrad_template) // override first layer groups if present
00395                 n_neuron_groups -= layer_sizes[1];
00396         }
00397         params_natgrad_per_group.resize(n_input_groups+n_neuron_groups);
00398         for (int i=0;i<n_input_groups;i++)
00399             params_natgrad_per_group[i] = PLearn::deepCopy(params_natgrad_per_input_template);
00400         for (int i=n_input_groups; i<n_input_groups+n_neuron_groups;i++)
00401             params_natgrad_per_group[i] = PLearn::deepCopy(params_natgrad_template);
00402     }
00403     if (neurons_natgrad_template && neurons_natgrad_per_layer.length()==0)
00404     {
00405         neurons_natgrad_per_layer.resize(n_layers); // 0 not used
00406         for (int i=1;i<n_layers;i++) // no need for correcting input layer
00407             neurons_natgrad_per_layer[i] = PLearn::deepCopy(neurons_natgrad_template);
00408     }
00409     neuron_gradients.resize(minibatch_size,n_neurons);
00410     neuron_outputs_per_layer.resize(n_layers); // layer 0 = input, layer n_layers-1 = output
00411     neuron_extended_outputs_per_layer.resize(n_layers); // layer 0 = input, layer n_layers-1 = output
00412     neuron_gradients_per_layer.resize(n_layers); // layer 0 not used
00413     neuron_extended_outputs_per_layer[0].resize(minibatch_size,1+layer_sizes[0]);
00414     neuron_outputs_per_layer[0]=neuron_extended_outputs_per_layer[0].subMatColumns(1,layer_sizes[0]);
00415     neuron_extended_outputs_per_layer[0].column(0).fill(1.0); // for biases
00416     for (int i=1,k=0;i<n_layers;k+=layer_sizes[i],i++)
00417     {
00418         neuron_extended_outputs_per_layer[i].resize(minibatch_size,1+layer_sizes[i]);
00419         neuron_outputs_per_layer[i]=neuron_extended_outputs_per_layer[i].subMatColumns(1,layer_sizes[i]);
00420         neuron_extended_outputs_per_layer[i].column(0).fill(1.0); // for biases
00421         neuron_gradients_per_layer[i] = 
00422             neuron_gradients.subMatColumns(k,layer_sizes[i]);
00423     }
00424     example_weights.resize(minibatch_size);
00425     TVec<string> train_cost_names = getTrainCostNames() ;
00426     train_costs.resize(minibatch_size,train_cost_names.length()-2 );
00427 
00428     Profiler::activate();
00429 
00430     // Gradient correlation profiling
00431     //if( corr_profiling_start != corr_profiling_end )  {
00432     //    PLASSERT( (0<=corr_profiling_start) && (corr_profiling_start<corr_profiling_end) );
00433     //    cout << "n_params " << n_params << endl;
00434     //    // Build the names.
00435     //    stringstream ss_suffix;
00436     //    for (int i=0;i<n_layers;i++)    {
00437     //        ss_suffix << "_" << layer_sizes[i];
00438     //    }
00439     //    ss_suffix << "_stages_" << corr_profiling_start << "_" << corr_profiling_end;
00440     //    string str_gc_name = "gCcorr" + ss_suffix.str();
00441     //    string str_ngc_name;
00442     //    if( full_natgrad )  {
00443     //        str_ngc_name = "ngCcorr_full" + ss_suffix.str();
00444     //    }   else if (params_natgrad_template)   {
00445     //        str_ngc_name = "ngCcorr_params" + ss_suffix.str();
00446     //    }
00447     //    // Build the profilers.
00448     //    g_corrprof = new CorrelationProfiler( n_params, str_gc_name);
00449     //    g_corrprof->build();
00450     //    ng_corrprof = new CorrelationProfiler( n_params, str_ngc_name);
00451     //    ng_corrprof->build();
00452     //}
00453 
00454 }
00455 
00456 // ### Nothing to add here, simply calls build_
00457 void NatGradNNet::build()
00458 {
00459     inherited::build();
00460     build_();
00461 }
00462 
00463 
00464 void NatGradNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00465 {
00466     inherited::makeDeepCopyFromShallowCopy(copies);
00467 
00468     deepCopyField(hidden_layer_sizes, copies);
00469     deepCopyField(layer_params, copies);
00470     deepCopyField(layer_mparams, copies);
00471     deepCopyField(biases, copies);
00472     deepCopyField(weights, copies);
00473     deepCopyField(mweights, copies);
00474     deepCopyField(activations_scaling, copies);
00475     deepCopyField(neurons_natgrad_template, copies);
00476     deepCopyField(neurons_natgrad_per_layer, copies);
00477     deepCopyField(params_natgrad_template, copies);
00478     deepCopyField(params_natgrad_per_input_template, copies);
00479     deepCopyField(params_natgrad_per_group, copies);
00480     deepCopyField(full_natgrad, copies);
00481     deepCopyField(layer_sizes, copies);
00482     deepCopyField(targets, copies);
00483     deepCopyField(example_weights, copies);
00484     deepCopyField(train_costs, copies);
00485     deepCopyField(neuron_outputs_per_layer, copies);
00486     deepCopyField(neuron_extended_outputs_per_layer, copies);
00487     deepCopyField(all_params, copies);
00488     deepCopyField(all_mparams, copies);
00489     deepCopyField(all_params_gradient, copies);
00490     deepCopyField(layer_params_gradient, copies);
00491     deepCopyField(neuron_gradients, copies);
00492     deepCopyField(neuron_gradients_per_layer, copies);
00493     deepCopyField(all_params_delta, copies);
00494     deepCopyField(group_params, copies);
00495     deepCopyField(group_params_gradient, copies);
00496     deepCopyField(group_params_delta, copies);
00497     deepCopyField(layer_params_delta, copies);
00498 
00499 /*
00500     deepCopyField(, copies);
00501 */
00502 }
00503 
00504 
00505 int NatGradNNet::outputsize() const
00506 {
00507     return noutputs;
00508 }
00509 
00510 void NatGradNNet::forget()
00511 {
00515     inherited::forget();
00516     for (int i=0;i<n_layers-1;i++)
00517     {
00518         real delta = 1/sqrt(real(layer_sizes[i]));
00519         random_gen->fill_random_uniform(weights[i],-delta,delta);
00520         biases[i].clear();
00521         activations_scaling[i].fill(1.0);
00522         mean_activations[i].clear();
00523         var_activations[i].fill(1.0);
00524     }
00525     stage = 0;
00526     cumulative_training_time=0;
00527     if (params_averaging_coeff!=1.0)
00528         all_mparams << all_params;
00529     
00530     // *stat*
00531     /*if( pa_gradstats.length() == 0 )    {
00532         pa_gradstats.resize(noutputs);
00533         for(int i=0; i<noutputs; i++)   {
00534             (pa_gradstats[i]).compute_covariance = true;
00535         }
00536     }   else    {
00537         for(int i=0; i<noutputs; i++)   {
00538             (pa_gradstats[i]).forget();
00539         }
00540     }*/
00541 
00542 }
00543 
00544 void NatGradNNet::train()
00545 {
00546 
00547     if (inputsize_<0)
00548         build();
00549 
00550     targets.resize(minibatch_size,targetsize());  // the train_set's targetsize()
00551 
00552     if(!train_set)
00553         PLERROR("In NNet::train, you did not setTrainingSet");
00554     
00555     if(!train_stats)
00556         setTrainStatsCollector(new VecStatsCollector());
00557 
00558     train_costs.fill(MISSING_VALUE) ;
00559 
00560     train_stats->forget();
00561 
00562     PP<ProgressBar> pb;
00563 
00564     Profiler::reset("training");
00565     Profiler::start("training");
00566     Profiler::pl_profile_start("Totaltraining");
00567     if( report_progress && stage < nstages )
00568         pb = new ProgressBar( "Training "+classname(),
00569                               nstages - stage );
00570     int start_stage=stage;
00571 
00572     Vec costs_plus_time(train_costs.width()+2);
00573     costs_plus_time[train_costs.width()] = MISSING_VALUE;
00574     costs_plus_time[train_costs.width()+1] = MISSING_VALUE;
00575     Vec costs = costs_plus_time.subVec(0,train_costs.width());
00576     int nsamples = train_set->length();
00577 
00578     // *stat* - Need some stats for grad analysis
00579     //sum_gradient_norms = 0.0;
00580     //all_params_cum_gradient.fill(0.0);
00581     
00582     for( ; stage<nstages; stage++)
00583     {
00584         int sample = stage % nsamples;
00585         int b = stage % minibatch_size;
00586         Vec input = neuron_outputs_per_layer[0](b);
00587         Vec target = targets(b);
00588         Profiler::pl_profile_start("NatGradNNet::getting_data");
00589         train_set->getExample(sample, input, target, example_weights[b]);
00590         Profiler::pl_profile_end("NatGradNNet::getting_data");
00591         if (b+1==minibatch_size) // do also special end-case || stage+1==nstages)
00592         {
00593             onlineStep(stage, targets, train_costs, example_weights );
00594             for (int i=0;i<minibatch_size;i++)
00595             {
00596                 costs << train_costs(b);
00597                 train_stats->update( costs_plus_time );
00598                 
00599             }
00600         }
00601         if (params_averaging_coeff!=1.0 && 
00602             b==minibatch_size-1 && 
00603             (stage+1)%(minibatch_size*params_averaging_freq)==0)
00604             multiplyScaledAdd(all_params, 1-params_averaging_coeff,
00605                               params_averaging_coeff, all_mparams);
00606         if( pb )
00607             pb->update( stage + 1 - start_stage);
00608 
00609         // *stat*
00610         //(pa_gradstats[(int)targets(0,0)]).update( all_params_gradient );
00611 
00612     }
00613     Profiler::end("training");
00614     Profiler::pl_profile_end("Totaltraining");
00615     if (verbosity>0)
00616         Profiler::report(cout);
00617     const Profiler::Stats& stats = Profiler::getStats("training");
00618     costs.fill(MISSING_VALUE);
00619     real ticksPerSec = Profiler::ticksPerSecond();
00620     real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec;
00621     cumulative_training_time += cpu_time;
00622     costs_plus_time[train_costs.width()] = cpu_time;
00623     costs_plus_time[train_costs.width()+1] = cumulative_training_time;
00624     train_stats->update( costs_plus_time );
00625     train_stats->finalize(); // finalize statistics for this epoch
00626 
00627     // *stat*
00628     // profiling gradient correlation
00629     //if( g_corrprof )    {
00630     //    PLASSERT( corr_profiling_end <= nstages );
00631     //    g_corrprof->printAndReset();
00632     //    ng_corrprof->printAndReset();
00633     //}
00634 
00635     // *stat* - Need some stats for grad analysis
00636     // The SGrad stats include the learning rate.
00637     //cout << "sum_gradient_norms " << sum_gradient_norms 
00638     //     << " norm(all_params_cum_gradient,2.0) " << norm(all_params_cum_gradient,2.0) << endl;
00639 
00640     // *stat*
00641     //for(int i=0; i<noutputs; i++)   {
00642     //    ofstream fd_cov;
00643     //    stringstream ss;
00644     //    ss << "cov" << i+1 << ".txt";
00645     //    fd_cov.open(ss.str().c_str());
00646     //    fd_cov << (pa_gradstats[i]).getCovariance();
00647     //    fd_cov.close();
00648     //}
00649     
00650 
00651 }
00652 
00653 void NatGradNNet::onlineStep(int t, const Mat& targets,
00654                              Mat& train_costs, Vec example_weights)
00655 {
00656     // mean gradient over minibatch_size examples has less variance, can afford larger learning rate
00657     real lrate = sqrt(real(minibatch_size))*init_lrate/(1 + t*lrate_decay);
00658     PLASSERT(targets.length()==minibatch_size && train_costs.length()==minibatch_size && example_weights.length()==minibatch_size);
00659     fpropNet(minibatch_size,true);
00660     fbpropLoss(neuron_outputs_per_layer[n_layers-1],targets,example_weights,train_costs);
00661     for (int i=n_layers-1;i>0;i--)
00662     {
00663         // here neuron_gradients_per_layer[i] contains the gradient on activations (weighted sums)
00664         //      (minibatch_size x layer_size[i])
00665 
00666         Mat previous_neurons_gradient = neuron_gradients_per_layer[i-1];
00667         Mat next_neurons_gradient = neuron_gradients_per_layer[i];
00668         Mat previous_neurons_output = neuron_outputs_per_layer[i-1];
00669         real layer_lrate_factor = (i==n_layers-1)?output_layer_lrate_scale:1;
00670         if (self_adjusted_scaling_and_bias && i+1<n_layers-1)
00671             for (int k=0;k<minibatch_size;k++)
00672             {
00673                 Vec g=next_neurons_gradient(k);
00674                 g*=activations_scaling[i-1]; // pass gradient through scaling
00675             }
00676         if (input_size_lrate_normalization_power==-1)
00677             layer_lrate_factor /= sumsquare(neuron_extended_outputs_per_layer[i-1]);
00678         else if (input_size_lrate_normalization_power==-2)
00679             layer_lrate_factor /= sqrt(sumsquare(neuron_extended_outputs_per_layer[i-1]));
00680         else if (input_size_lrate_normalization_power!=0)
00681         {
00682             int fan_in = neuron_extended_outputs_per_layer[i-1].length();
00683             if (input_size_lrate_normalization_power==1)
00684                 layer_lrate_factor/=fan_in;
00685             else if (input_size_lrate_normalization_power==2)
00686                 layer_lrate_factor/=sqrt(real(fan_in));
00687             else layer_lrate_factor/=pow(fan_in,1.0/input_size_lrate_normalization_power);
00688         }
00689         // optionally correct the gradient on neurons using their covariance
00690         if (neurons_natgrad_template && neurons_natgrad_per_layer[i])
00691         {
00692             static Vec tmp;
00693             tmp.resize(layer_sizes[i]);
00694             for (int k=0;k<minibatch_size;k++)
00695             {
00696                 Vec g_k = next_neurons_gradient(k);
00697                 (*neurons_natgrad_per_layer[i])(t-minibatch_size+1+k,g_k,tmp);
00698                 g_k << tmp;
00699             }
00700         }
00701         if (i>1) // compute gradient on previous layer
00702         {
00703             // propagate gradients
00704             Profiler::pl_profile_start("ProducScaleAccOnlineStep");
00705             productScaleAcc(previous_neurons_gradient,next_neurons_gradient,false,
00706                             weights[i-1],false,1,0);
00707             Profiler::pl_profile_end("ProducScaleAccOnlineStep");
00708             // propagate through tanh non-linearity
00709             for (int j=0;j<previous_neurons_gradient.length();j++)
00710             {
00711                 real* grad = previous_neurons_gradient[j];
00712                 real* out = previous_neurons_output[j];
00713                 for (int k=0;k<previous_neurons_gradient.width();k++,out++)
00714                     grad[k] *= (1 - *out * *out); // gradient through tanh derivative
00715             }
00716         }
00717         // compute gradient on parameters, possibly update them
00718         if (full_natgrad || params_natgrad_template || params_natgrad_per_input_template) 
00719         {
00720 //alternate
00721             if( params_natgrad_per_input_template && i==1 ){ // parameters are transposed
00722                 Profiler::pl_profile_start("ProducScaleAccOnlineStep");
00723                 productScaleAcc(layer_params_gradient[i-1],
00724                             neuron_extended_outputs_per_layer[i-1], true,
00725                             next_neurons_gradient, false, 
00726                             1, 0);
00727                 Profiler::pl_profile_end("ProducScaleAccOnlineStep");
00728             }else{
00729                 Profiler::pl_profile_start("ProducScaleAccOnlineStep");
00730                 productScaleAcc(layer_params_gradient[i-1],next_neurons_gradient,true,
00731                             neuron_extended_outputs_per_layer[i-1],false,1,0);
00732                 Profiler::pl_profile_end("ProducScaleAccOnlineStep");
00733             }
00734             layer_params_gradient[i-1] *= 1.0/minibatch_size; // use the MEAN gradient
00735         } else {// just regular stochastic gradient
00736             // compute gradient on weights and update them in one go (more efficient)
00737             // mean gradient has less variance, can afford larger learning rate
00738             Profiler::pl_profile_start("ProducScaleAccOnlineStep");
00739             productScaleAcc(layer_params[i-1],next_neurons_gradient,true,
00740                             neuron_extended_outputs_per_layer[i-1],false,
00741                             -layer_lrate_factor*lrate/minibatch_size,1);
00742             Profiler::pl_profile_end("ProducScaleAccOnlineStep");
00743 
00744             // Don't do the stochastic trick - remember the gradient times its
00745             // learning rate
00746             /*productScaleAcc(layer_params_gradient[i-1],next_neurons_gradient,true,
00747                             neuron_extended_outputs_per_layer[i-1],false,
00748                             -layer_lrate_factor*lrate/minibatch_size,0);
00749             layer_params[i-1] += layer_params_gradient[i-1];*/
00750   
00751             // *stat* - compute and store the gradient
00752             /*productScaleAcc(layer_params_gradient[i-1],next_neurons_gradient,true,
00753                             neuron_extended_outputs_per_layer[i-1],false,
00754                             1,0);*/
00755         }
00756     }
00757     if (full_natgrad)
00758     {
00759         (*full_natgrad)(t/minibatch_size,all_params_gradient,all_params_delta); // compute update direction by natural gradient
00760         if (output_layer_lrate_scale!=1.0)
00761             layer_params_delta[n_layers-2] *= output_layer_lrate_scale; // scale output layer's learning rate
00762         multiplyAcc(all_params,all_params_delta,-lrate); // update
00763         // Hack to apply batch gradient even in this case (used for profiling
00764         // the gradient correlations)
00765         //if (output_layer_lrate_scale!=1.0)
00766         //      layer_params_gradient[n_layers-2] *= output_layer_lrate_scale; // scale output layer's learning rate
00767         //  multiplyAcc(all_params,all_params_gradient,-lrate); // update
00768 
00769     } else if (params_natgrad_template || params_natgrad_per_input_template)
00770     {
00771         for (int i=0;i<params_natgrad_per_group.length();i++)
00772         {
00773             GradientCorrector& neuron_natgrad = *(params_natgrad_per_group[i]);
00774             neuron_natgrad(t/minibatch_size,group_params_gradient[i],group_params_delta[i]); // compute update direction by natural gradient
00775         }
00776 //alternate
00777         if (output_layer_lrate_scale!=1.0)
00778             layer_params_delta[n_layers-2] *= output_layer_lrate_scale; // scale output layer's learning rate 
00779         multiplyAcc(all_params,all_params_delta,-lrate); // update
00780     }
00781 
00782     // Output layer L1 regularization
00783     if( output_layer_L1_penalty_factor != 0. )    {
00784         real L1_delta = lrate * output_layer_L1_penalty_factor;
00785         real* m_i = layer_params[n_layers-2].data();
00786 
00787         for(int i=0; i<layer_params[n_layers-2].length(); i++,m_i+=layer_params[n_layers-2].mod())  {
00788             for(int j=0; j<layer_params[n_layers-2].width(); j++)   {
00789                 if( m_i[j] > L1_delta )
00790                     m_i[j] -= L1_delta;
00791                 else if( m_i[j] < -L1_delta )
00792                     m_i[j] += L1_delta;
00793                 else
00794                     m_i[j] = 0.;
00795             }
00796         }
00797     }
00798 
00799     // profiling gradient correlation
00800     //if( (t>=corr_profiling_start) && (t<=corr_profiling_end) && g_corrprof )    {
00801     //    (*g_corrprof)(all_params_gradient);
00802     //    (*ng_corrprof)(all_params_delta);
00803     //}
00804 
00805     // temporary - Need some stats for pvgrad analysis
00806     // SGrad stats. This includes the learning rate.
00807     /*if( ! use_pvgrad )  {
00808         sum_gradient_norms += norm(all_params_gradient,2.0);
00809         all_params_cum_gradient += all_params_gradient;
00810     }*/
00811 
00812 
00813     // Ouput for profiling: weights
00814     // horribly inefficient! Anyway the Mat output is done one number at a
00815     // time...
00816     // do it locally, say on /part/01/Tmp
00817 /*    ofstream fd_params;
00818     fd_params.open("params.txt", ios::app);
00819     fd_params << layer_params[0](0) << " " << layer_params[1](0) << endl;
00820     fd_params.close();
00821 
00822     ofstream fd_gradients;
00823     fd_gradients.open("gradients.txt", ios::app);
00824     //fd_gradients << all_params_gradient << endl;
00825     fd_gradients << layer_params_gradient[0](0) << " " <<layer_params_gradient[1](0) << endl;
00826     fd_gradients.close();
00827 */
00828 }
00829 
00830 void NatGradNNet::computeOutput(const Vec& input, Vec& output) const
00831 {
00832     Profiler::pl_profile_start("NatGradNNet::computeOutput");
00833     neuron_outputs_per_layer[0](0) << input;
00834     fpropNet(1,false);
00835     output << neuron_outputs_per_layer[n_layers-1](0);
00836     Profiler::pl_profile_end("NatGradNNet::computeOutput");
00837 }
00838 
00840 void NatGradNNet::fpropNet(int n_examples, bool during_training) const
00841 {
00842     PLASSERT_MSG(n_examples<=minibatch_size,"NatGradNNet::fpropNet: nb input vectors treated should be <= minibatch_size\n");
00843     for (int i=0;i<n_layers-1;i++)
00844     {
00845         Mat prev_layer = (self_adjusted_scaling_and_bias && i+1<n_layers-1)?
00846             neuron_outputs_per_layer[i]:neuron_extended_outputs_per_layer[i];
00847         Mat next_layer = neuron_outputs_per_layer[i+1];
00848         if (n_examples!=minibatch_size)
00849         {
00850             prev_layer = prev_layer.subMatRows(0,n_examples);
00851             next_layer = next_layer.subMatRows(0,n_examples);
00852         }
00853 //alternate
00854         // Are the input weights transposed? (because of ...)
00855         bool tw = true;
00856         if( params_natgrad_per_input_template && i==0 )
00857             tw = false;
00858 
00859         // try to use BLAS for the expensive operation
00860         if (self_adjusted_scaling_and_bias && i+1<n_layers-1){
00861             if (during_training)
00862                 Profiler::pl_profile_start("ProducScaleAccFpropTrain");
00863             else
00864                 Profiler::pl_profile_start("ProducScaleAccFpropNoTrain");
00865             productScaleAcc(next_layer, prev_layer, false, 
00866                             (during_training || params_averaging_coeff==1.0)?
00867                             weights[i]:mweights[i], 
00868                             tw, 1, 0);
00869             if (during_training)
00870                 Profiler::pl_profile_end("ProducScaleAccFpropTrain");
00871             else
00872                 Profiler::pl_profile_end("ProducScaleAcccFpropNoTrain");
00873         }else{
00874             if (during_training)
00875                 Profiler::pl_profile_start("ProducScaleAccFpropTrain");
00876             else
00877                 Profiler::pl_profile_start("ProducScaleAcccFpropNoTrain");
00878             productScaleAcc(next_layer, prev_layer, false, 
00879                             (during_training || params_averaging_coeff==1.0)?
00880                             layer_params[i]:layer_mparams[i], 
00881                             tw, 1, 0);
00882             if (during_training)
00883                 Profiler::pl_profile_end("ProducScaleAccFpropTrain");
00884             else
00885                 Profiler::pl_profile_end("ProducScaleAcccFpropNoTrain");
00886         }
00887         // compute layer's output non-linearity
00888         if (i+1<n_layers-1)
00889             for (int k=0;k<n_examples;k++)
00890             {
00891                 Vec L=next_layer(k);
00892                 if (self_adjusted_scaling_and_bias)
00893                 {
00894                     real* m=mean_activations[i].data();
00895                     real* v=var_activations[i].data();
00896                     real* a=L.data();
00897                     real* s=activations_scaling[i].data();
00898                     real* b=biases[i].data(); // biases[i] is a 1-column matrix
00899                     int bmod = biases[i].mod();
00900                     for (int j=0;j<layer_sizes[i+1];j++,b+=bmod,m++,v++,a++,s++)
00901                     {
00902                         if (during_training)
00903                         {
00904                             real diff = *a - *m;
00905                             *v = (1-activation_statistics_moving_average_coefficient) * *v
00906                                 + activation_statistics_moving_average_coefficient * diff*diff;
00907                             *m = (1-activation_statistics_moving_average_coefficient) * *m
00908                                 + activation_statistics_moving_average_coefficient * *a;
00909                             *b = target_mean_activation - *m;
00910                             if (*v<100*target_stdev_activation*target_stdev_activation)
00911                                 *s = target_stdev_activation/sqrt(*v);
00912                             else // rescale the weights and the statistics for that neuron
00913                             {
00914                                 real rescale_factor = target_stdev_activation/sqrt(*v);
00915                                 Vec w = weights[i](j);
00916                                 w *= rescale_factor;
00917                                 *b *= rescale_factor;
00918                                 *s = 1;
00919                                 *m *= rescale_factor;
00920                                 *v *= rescale_factor*rescale_factor;
00921                             }
00922                         }
00923                         Profiler::pl_profile_start("activation function");
00924                         *a = tanh((*a + *b) * *s);
00925                         Profiler::pl_profile_end("activation function");
00926                     }
00927                 }
00928                 else{
00929                     Profiler::pl_profile_start("activation function");
00930                     compute_tanh(L,L);
00931                     Profiler::pl_profile_end("activation function");
00932                 }
00933             }
00934         else if (output_type=="NLL")
00935             for (int k=0;k<n_examples;k++)
00936             {
00937                 Vec L=next_layer(k);
00938                 Profiler::pl_profile_start("activation function");
00939                 log_softmax(L,L);
00940                 Profiler::pl_profile_end("activation function");
00941             }
00942         else if (output_type=="cross_entropy")  {
00943             for (int k=0;k<n_examples;k++)
00944             {
00945                 Vec L=next_layer(k);
00946                 Profiler::pl_profile_start("activation function");
00947                 log_sigmoid(L,L);
00948                 Profiler::pl_profile_end("activation function");
00949             }
00950          }
00951     }
00952 }
00953 
00955 void NatGradNNet::fbpropLoss(const Mat& output, const Mat& target, const Vec& example_weight, Mat& costs) const
00956 {
00957     int n_examples = output.length();
00958     Mat out_grad = neuron_gradients_per_layer[n_layers-1];
00959     if (n_examples!=minibatch_size)
00960         out_grad = out_grad.subMatRows(0,n_examples);
00961     if (output_type=="NLL")
00962     {
00963         for (int i=0;i<n_examples;i++)
00964         {
00965             int target_class = int(round(target(i,0)));
00966 #ifdef BOUNDCHECK
00967             if(target_class>=noutputs)
00968                 PLERROR("In NatGradNNet::fbpropLoss one target value %d is higher then allowed by nout %d",
00969                         target_class, noutputs);
00970 #endif          
00971             Vec outp = output(i);
00972             Vec grad = out_grad(i);
00973             exp(outp,grad); // map log-prob to prob
00974             costs(i,0) = -outp[target_class];
00975             costs(i,1) = (target_class == argmax(outp))?0:1;
00976             grad[target_class]-=1;
00977 
00978             costs(i,0) *= example_weight[i];
00979             costs(i,2) = costs(i,1) * example_weight[i];
00980             grad *= example_weight[i];
00981         }
00982     }
00983     else if(output_type=="cross_entropy")   {
00984         for (int i=0;i<n_examples;i++)
00985         {
00986             int target_class = int(round(target(i,0)));
00987             Vec outp = output(i);
00988             Vec grad = out_grad(i);
00989             exp(outp,grad); // map log-prob to prob
00990             if( target_class == 1 ) {
00991                 costs(i,0) = - outp[0];
00992                 costs(i,1) = (grad[0]>0.5)?0:1;
00993             }   else    {
00994                 costs(i,0) = - pl_log( 1.0 - grad[0] );
00995                 costs(i,1) = (grad[0]>0.5)?1:0;
00996             }
00997             grad[0] -= (real)target_class;
00998 
00999             costs(i,0) *= example_weight[i];
01000             costs(i,2) = costs(i,1) * example_weight[i];
01001             grad *= example_weight[i];
01002         }
01003 //cout << "costs\t" << costs(0) << endl;
01004 //cout << "gradient\t" << out_grad(0) << endl;
01005 
01006     }
01007     else // if (output_type=="MSE")
01008     {
01009         substract(output,target,out_grad);
01010         for (int i=0;i<n_examples;i++)
01011         {
01012             costs(i,0) = pownorm(out_grad(i));
01013             if (example_weight[i]!=1.0)
01014             {
01015                 out_grad(i) *= example_weight[i];
01016                 costs(i,0) *= example_weight[i];
01017             }
01018         }
01019     }
01020 }
01021 
01022 void NatGradNNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
01023                                            const Vec& target, Vec& costs) const
01024 {
01025     Vec w(1);
01026     w[0]=1;
01027     Mat outputM = output.toMat(1,output.length());
01028     Mat targetM = target.toMat(1,output.length());
01029     Mat costsM = costs.toMat(1,costs.length());
01030     fbpropLoss(outputM,targetM,w,costsM);
01031 }
01032 /*
01033 void NatGradNNet::computeOutput(const Vec& input, Vec& output)
01034 {
01035     Profiler::pl_profile_start("computeOutput");
01036     neuron_outputs_per_layer[0](0) << input;
01037     fpropNet(1,false);
01038     output << neuron_outputs_per_layer[n_layers-1](0);
01039     Profiler::pl_profile_end("computeOutput");
01040     }
01041 void PLearner::computeOutputAndCosts(const Vec& input, const Vec& target, 
01042                                      Vec& output, Vec& costs) const
01043 {
01044     computeOutput(input, output);
01045     computeCostsFromOutputs(input, output, target, costs);
01046 }
01047 */
01048 
01049 void NatGradNNet::computeOutputs(const Mat& input, Mat& output) const
01050 {
01051     Profiler::pl_profile_start("NatGradNNet::computeOutputs");
01052     PLASSERT(test_minibatch_size<=minibatch_size);
01053     neuron_outputs_per_layer[0].subMat(0,0,input.length(),input.width()) << input;
01054     fpropNet(input.length(),false);
01055     output << neuron_outputs_per_layer[n_layers-1].subMat(0,0,output.length(),output.width());
01056     Profiler::pl_profile_end("NatGradNNet::computeOutputs");
01057 }
01058 void NatGradNNet::computeOutputsAndCosts(const Mat& input, const Mat& target, 
01059                                       Mat& output, Mat& costs) const
01060 {//TODO
01061     Profiler::pl_profile_start("NatGradNNet::computeOutputsAndCosts");
01062 
01063     int n=input.length();
01064     PLASSERT(target.length()==n);
01065     output.resize(n,outputsize());
01066     costs.resize(n,nTestCosts());
01067     computeOutputs(input,output);
01068 
01069     Vec w(n);
01070     w.fill(1);
01071     fbpropLoss(output,target,w,costs);
01072     Profiler::pl_profile_end("NatGradNNet::computeOutputsAndCosts");
01073     }
01074 TVec<string> NatGradNNet::getTestCostNames() const
01075 {
01076     TVec<string> costs;
01077     if (output_type=="NLL")
01078     {
01079         costs.resize(3);
01080         costs[0]="NLL";
01081         costs[1]="class_error";
01082         costs[2]="weighted_class_error";
01083     }
01084     else if (output_type=="cross_entropy")  {
01085         costs.resize(3);
01086         costs[0]="cross_entropy";
01087         costs[1]="class_error";
01088         costs[2]="weighted_class_error";
01089     }
01090     else if (output_type=="MSE")
01091     {
01092         costs.resize(1);
01093         costs[0]="MSE";
01094     }
01095     return costs;
01096 }
01097 
01098 TVec<string> NatGradNNet::getTrainCostNames() const
01099 {
01100     TVec<string> costs = getTestCostNames();
01101     costs.append("train_seconds");
01102     costs.append("cum_train_seconds");
01103     return costs;
01104 }
01105 
01106 
01107 } // end of namespace PLearn
01108 
01109 
01110 /*
01111   Local Variables:
01112   mode:c++
01113   c-basic-offset:4
01114   c-file-style:"stroustrup"
01115   c-file-offsets:((innamespace . 0)(inline-open . 0))
01116   indent-tabs-mode:nil
01117   fill-column:79
01118   End:
01119 */
01120 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines