PLearn 0.1
NatGradSMPNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NatGradSMPNNet.cc
00004 //
00005 // Copyright (C) 2007 Yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Yoshua Bengio
00036 
00040 #include "NatGradSMPNNet.h"
00041 #include <plearn/io/openFile.h>
00042 #include <plearn/math/pl_erf.h>
00043 #include <plearn/misc/PTimer.h>
00044 
00045 #include <sys/ipc.h>
00046 #include <sys/sem.h>
00047 #include <sys/shm.h>
00048 
00049 #define PL_LOG_MODULE_NAME "NatGradSMPNNet"
00050 #include <plearn/io/pl_log.h>
00051 
00052 namespace PLearn {
00053 using namespace std;
00054 
00055 union semun {
00056     int val;                    
00057     struct semid_ds *buf;       
00058     unsigned short int *array;  
00059     struct seminfo *__buf;      
00060 };
00061 
00062 PLEARN_IMPLEMENT_OBJECT(
00063     NatGradSMPNNet,
00064     "Multi-layer neural network trained with an efficient Natural Gradient optimization",
00065     "A separate covariance matrix is estimated for the gradients associated with the\n"
00066     "the input weights of each neuron, and a covariance matrix between the gradients\n"
00067     "on the neurons is also computed. These are combined to obtained an adjusted gradient\n"
00068     "on all the parameters. The class GradientCorrector embodies the adjustment algorithm.\n"
00069     "Users may specify different options for the estimator that is used for correcting\n"
00070     "the neurons gradients and for the estimator that is used for correcting the\n"
00071     "parameters gradients (separately for each neuron).\n"
00072     );
00073 
00074 NatGradSMPNNet::NatGradSMPNNet():
00075       delayed_update(true),
00076       wait_for_final_update(true),
00077       synchronize_update(false),
00078       noutputs(-1),
00079       params_averaging_coeff(1.0),
00080       params_averaging_freq(5),
00081       init_lrate(0.01),
00082       lrate_decay(0),
00083       output_layer_L1_penalty_factor(0.0),
00084       output_layer_lrate_scale(1),
00085       minibatch_size(1),
00086       output_type("NLL"),
00087       input_size_lrate_normalization_power(0),
00088       lrate_scale_factor(3),
00089       lrate_scale_factor_max_power(0),
00090       lrate_scale_factor_min_power(0),
00091       self_adjusted_scaling_and_bias(false),
00092       target_mean_activation(-4), // 
00093       target_stdev_activation(3), // 2.5% of the time we are above 1
00094       verbosity(0),
00095       //corr_profiling_start(0), 
00096       //corr_profiling_end(0),
00097       use_pvgrad(false),
00098       pv_initial_stepsize(1e-6),
00099       pv_acceleration(2),
00100       pv_min_samples(2),
00101       pv_required_confidence(0.80),
00102       pv_random_sample_step(false),
00103       pv_gradstats(new VecStatsCollector()),
00104       n_layers(-1),
00105       cumulative_training_time(0),
00106       params_ptr(NULL),
00107       params_id(-1),
00108       params_int_ptr(NULL),
00109       params_int_id(-1),
00110       nsteps(0),
00111       semaphore_id(-1)
00112 {
00113     random_gen = new PRandom();
00114 }
00115 
00117 // declareOptions //
00119 void NatGradSMPNNet::declareOptions(OptionList& ol)
00120 {
00121     declareOption(ol, "delayed_update", &NatGradSMPNNet::delayed_update,
00122                   OptionBase::buildoption,
00123         "If true, then each CPU's update will be delayed until it is its own\n"
00124         "turn to update. This ensures no two CPUs are modifying parameters\n"
00125         "at the same time.");
00126 
00127     declareOption(ol, "wait_for_final_update",
00128                   &NatGradSMPNNet::wait_for_final_update,
00129                   OptionBase::buildoption,
00130         "If true, each CPU will wait its turn before performing its final\n"
00131         "update. It should impact performance only when 'delayed_update' is\n"
00132         "also true.");
00133 
00134     declareOption(ol, "synchronize_update", &NatGradSMPNNet::synchronize_update,
00135                   OptionBase::buildoption,
00136         "If true, then processors will in turn update the shared paremeters\n"
00137         "after each mini-batch and will wait until all processors did their\n"
00138         "update before processing the next mini-batch. Otherwise, no\n"
00139         "synchronization is performed and a processor may process multiple\n"
00140         "mini-batches before doing a parameter update.");
00141 
00142     declareOption(ol, "noutputs", &NatGradSMPNNet::noutputs,
00143                   OptionBase::buildoption,
00144                   "Number of outputs of the neural network, which can be derived from  output_type and targetsize_\n");
00145 
00146     declareOption(ol, "n_layers", &NatGradSMPNNet::n_layers,
00147                   OptionBase::learntoption,
00148                   "Number of layers of weights (ie. 2 for a neural net with one hidden layer).\n"
00149                   "Needs not be specified explicitly (derived from hidden_layer_sizes).\n");
00150 
00151     declareOption(ol, "hidden_layer_sizes", &NatGradSMPNNet::hidden_layer_sizes,
00152                   OptionBase::buildoption,
00153                   "Defines the architecture of the multi-layer neural network by\n"
00154                   "specifying the number of hidden units in each hidden layer.\n");
00155 
00156     declareOption(ol, "layer_sizes", &NatGradSMPNNet::layer_sizes,
00157                   OptionBase::learntoption,
00158                   "Derived from hidden_layer_sizes, inputsize_ and noutputs\n");
00159 
00160     declareOption(ol, "cumulative_training_time", &NatGradSMPNNet::cumulative_training_time,
00161                   OptionBase::learntoption,
00162                   "Cumulative training time since age=0, in seconds.\n");
00163 
00164     declareOption(ol, "layer_params", &NatGradSMPNNet::layer_params,
00165                   OptionBase::learntoption,
00166                   "Parameters used while training, for each layer, organized as follows: layer_params[i] \n"
00167                   "is a matrix of dimension layer_sizes[i+1] x (layer_sizes[i]+1)\n"
00168                   "containing the neuron biases in its first column.\n");
00169 
00170     declareOption(ol, "activations_scaling", &NatGradSMPNNet::activations_scaling,
00171                   OptionBase::learntoption,
00172                   "Scaling coefficients for each neuron of each layer, if self_adjusted_scaling_and_bias:\n"
00173                   " output = tanh(activations_scaling[layer][neuron] * (biases[layer][neuron] + weights[layer]*input[layer-1])\n");
00174 
00175     declareOption(ol, "layer_mparams", &NatGradSMPNNet::layer_mparams,
00176                   OptionBase::learntoption,
00177                   "Test parameters for each layer, organized like layer_params.\n"
00178                   "This is a moving average of layer_params, computed with\n"
00179                   "coefficient params_averaging_coeff. Thus the mparams are\n"
00180                   "a smoothed version of the params, and they are used only\n"
00181                   "during testing.\n");
00182 
00183     declareOption(ol, "params_averaging_coeff", &NatGradSMPNNet::params_averaging_coeff,
00184                   OptionBase::buildoption,
00185                   "Coefficient used to control how fast we forget old parameters\n"
00186                   "in the moving average performed as follows:\n"
00187                   "mparams <-- (1-params_averaging_coeff)mparams + params_averaging_coeff*params\n");
00188 
00189     declareOption(ol, "params_averaging_freq", &NatGradSMPNNet::params_averaging_freq,
00190                   OptionBase::buildoption,
00191                   "How often (in terms of number of minibatches, i.e. weight updates)\n"
00192                   "do we perform the moving average update calculation\n"
00193                   "mparams <-- (1-params_averaging_coeff)mparams + params_averaging_coeff*params\n");
00194 
00195     declareOption(ol, "init_lrate", &NatGradSMPNNet::init_lrate,
00196                   OptionBase::buildoption,
00197                   "Initial learning rate\n");
00198 
00199     declareOption(ol, "lrate_decay", &NatGradSMPNNet::lrate_decay,
00200                   OptionBase::buildoption,
00201                   "Learning rate decay factor\n");
00202 
00203     declareOption(ol, "output_layer_L1_penalty_factor",
00204                   &NatGradSMPNNet::output_layer_L1_penalty_factor,
00205                   OptionBase::buildoption,
00206                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00207                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| during training.\n"
00208                   "Gets multiplied by the learning rate. Only on output layer!!");
00209 
00210     declareOption(ol, "output_layer_lrate_scale", &NatGradSMPNNet::output_layer_lrate_scale,
00211                   OptionBase::buildoption,
00212                   "Scaling factor of the learning rate for the output layer. Values less than 1"
00213                   "mean that the output layer parameters have a smaller learning rate than the others.\n");
00214 
00215     declareOption(ol, "minibatch_size", &NatGradSMPNNet::minibatch_size,
00216                   OptionBase::buildoption,
00217                   "Update the parameters only so often (number of examples).\n");
00218 
00219     declareOption(ol, "neurons_natgrad_template", &NatGradSMPNNet::neurons_natgrad_template,
00220                   OptionBase::buildoption,
00221                   "Optional template GradientCorrector for the neurons gradient.\n"
00222                   "If not provided, then the natural gradient correction\n"
00223                   "on the neurons gradient is not performed.\n");
00224 
00225     declareOption(ol, "neurons_natgrad_per_layer", 
00226                   &NatGradSMPNNet::neurons_natgrad_per_layer,
00227                   OptionBase::learntoption,
00228                   "Vector of GradientCorrector objects for the gradient on the neurons of each layer.\n"
00229                   "They are copies of the neuron_natgrad_template provided by the user.\n");
00230 
00231     declareOption(ol, "params_natgrad_template", 
00232                   &NatGradSMPNNet::params_natgrad_template,
00233                   OptionBase::buildoption,
00234                   "Optional template GradientCorrector object for the gradient of the parameters inside each neuron\n"
00235                   "It is replicated in the params_natgrad vector, for each neuron\n"
00236                   "If not provided, then the neuron-specific natural gradient estimator is not used.\n");
00237 
00238     declareOption(ol, "params_natgrad_per_input_template",
00239                   &NatGradSMPNNet::params_natgrad_per_input_template,
00240                   OptionBase::buildoption,
00241                   "Optional template GradientCorrector object for the gradient of the parameters of the first layer\n"
00242                   "grouped based upon their input. It is replicated in the params_natgrad_per_group vector, for each group.\n"
00243                   "If provided, overides the params_natgrad_template for the parameters of the first layer.\n");
00244 
00245     declareOption(ol, "params_natgrad_per_group", 
00246                     &NatGradSMPNNet::params_natgrad_per_group,
00247                     OptionBase::learntoption,
00248                     "Vector of GradientCorrector objects for the gradient inside groups of parameters.\n"
00249                     "They are copies of the params_natgrad_template and params_natgrad_per_input_template\n"
00250                     "templates provided by the user.\n");
00251 
00252     declareOption(ol, "full_natgrad", &NatGradSMPNNet::full_natgrad,
00253                   OptionBase::buildoption,
00254                   "GradientCorrector for all the parameter gradients simultaneously.\n"
00255                   "This should not be set if neurons_natgrad or params_natgrad_template\n"
00256                   "is provided. If none of the GradientCorrectors is provided, then\n"
00257                   "regular stochastic gradient is performed.\n");
00258 
00259     declareOption(ol, "output_type", 
00260                   &NatGradSMPNNet::output_type,
00261                   OptionBase::buildoption,
00262                   "type of output cost: 'cross_entropy' for binary classification,\n"
00263                   "'NLL' for classification problems, or 'MSE' for regression.\n");
00264 
00265     declareOption(ol, "input_size_lrate_normalization_power", 
00266                   &NatGradSMPNNet::input_size_lrate_normalization_power, 
00267                   OptionBase::buildoption,
00268                   "Scale the learning rate neuron-wise (or layer-wise actually, here):\n"
00269                   "-1 scales by 1 / ||x||^2, where x is the 1-extended input vector of the neuron\n"
00270                   "0 does not scale the learning rate\n"
00271                   "1 scales it by 1 / the nb of inputs of the neuron\n"
00272                   "2 scales it by 1 / sqrt(the nb of inputs of the neuron), etc.\n");
00273 
00274     declareOption(ol, "lrate_scale_factor",
00275                   &NatGradSMPNNet::lrate_scale_factor,
00276                   OptionBase::buildoption,
00277                   "scale the learning rate in different neurons by a factor\n"
00278                   "taken randomly as follows: choose integer n uniformly between\n"
00279                   "lrate_scale_factor_min_power and lrate_scale_factor_max_power\n"
00280                   "inclusively, and then scale learning rate by lrate_scale_factor^n.\n");
00281 
00282     declareOption(ol, "lrate_scale_factor_max_power",
00283                   &NatGradSMPNNet::lrate_scale_factor_max_power,
00284                   OptionBase::buildoption,
00285                   "See help on lrate_scale_factor\n");
00286 
00287     declareOption(ol, "lrate_scale_factor_min_power",
00288                   &NatGradSMPNNet::lrate_scale_factor_min_power,
00289                   OptionBase::buildoption,
00290                   "See help on lrate_scale_factor\n");
00291 
00292     declareOption(ol, "self_adjusted_scaling_and_bias",
00293                   &NatGradSMPNNet::self_adjusted_scaling_and_bias,
00294                   OptionBase::buildoption,
00295                   "If true, let each neuron self-adjust its bias and scaling factor\n"
00296                   "of its activations so that the mean and standard deviation of the\n"
00297                   "activations reach the target_mean_activation and target_stdev_activation.\n"
00298                   "The activations mean and variance are estimated by a moving average with\n"
00299                   "coefficient given by activations_statistics_moving_average_coefficient\n");
00300 
00301     declareOption(ol, "target_mean_activation",
00302                   &NatGradSMPNNet::target_mean_activation,
00303                   OptionBase::buildoption,
00304                   "See help on self_adjusted_scaling_and_bias\n");
00305 
00306     declareOption(ol, "target_stdev_activation",
00307                   &NatGradSMPNNet::target_stdev_activation,
00308                   OptionBase::buildoption,
00309                   "See help on self_adjusted_scaling_and_bias\n");
00310 
00311     declareOption(ol, "activation_statistics_moving_average_coefficient",
00312                   &NatGradSMPNNet::activation_statistics_moving_average_coefficient,
00313                   OptionBase::buildoption,
00314                   "The activations mean and variance used for self_adjusted_scaling_and_bias\n"
00315                   "are estimated by a moving average with this coefficient:\n"
00316                   "   xbar <-- coefficient * xbar + (1-coefficient) x\n"
00317                   "where x could be the activation or its square\n");
00318 
00319     //declareOption(ol, "corr_profiling_start",
00320     //              &NatGradSMPNNet::corr_profiling_start,
00321     //              OptionBase::buildoption,
00322     //              "Stage to start the profiling of the gradients' and the\n"
00323     //              "natural gradients' correlation.\n");
00324 
00325     //declareOption(ol, "corr_profiling_end",
00326     //              &NatGradSMPNNet::corr_profiling_end,
00327     //              OptionBase::buildoption,
00328     //              "Stage to end the profiling of the gradients' and the\n"
00329     //              "natural gradients' correlations.\n");
00330 
00331     declareOption(ol, "use_pvgrad",
00332                   &NatGradSMPNNet::use_pvgrad,
00333                   OptionBase::buildoption,
00334                   "Use Pascal Vincent's gradient technique.\n"
00335                   "All options specific to this technique start with pv_...\n"
00336                   "This is currently very experimental. Current code is \n"
00337                   "NOT YET optimised for speed (nor supports minibatch).");
00338 
00339     declareOption(ol, "pv_initial_stepsize",
00340                   &NatGradSMPNNet::pv_initial_stepsize,
00341                   OptionBase::buildoption,
00342                   "Initial size of steps in parameter space");
00343 
00344     declareOption(ol, "pv_acceleration",
00345                   &NatGradSMPNNet::pv_acceleration,
00346                   OptionBase::buildoption,
00347                   "Coefficient by which to multiply/divide the step sizes");
00348 
00349     declareOption(ol, "pv_min_samples",
00350                   &NatGradSMPNNet::pv_min_samples,
00351                   OptionBase::buildoption,
00352                   "PV's minimum number of samples to estimate gradient sign.\n"
00353                   "This should at least be 2.");
00354 
00355     declareOption(ol, "pv_required_confidence",
00356                   &NatGradSMPNNet::pv_required_confidence,
00357                   OptionBase::buildoption,
00358                   "Minimum required confidence (probability of being positive or negative) for taking a step.");
00359 
00360     declareOption(ol, "pv_random_sample_step",
00361                   &NatGradSMPNNet::pv_random_sample_step,
00362                   OptionBase::buildoption,
00363                   "If this is set to true, then we will randomly choose the step sign\n"
00364                   "for each parameter based on the estimated probability of it being\n"
00365                   "positive or negative.");
00366 
00367     // Now call the parent class' declareOptions
00368     inherited::declareOptions(ol);
00369 }
00370 
00372 // declareMethods //
00374 void NatGradSMPNNet::declareMethods(RemoteMethodMap& rmm)
00375 {
00376     declareMethod(rmm, "freeSharedMemory", &NatGradSMPNNet::freeSharedMemory,
00377         (BodyDoc("Free shared memory ressources.")));
00378 
00379     inherited::declareMethods(rmm);
00380 }
00381 
00383 // build_ //
00385 void NatGradSMPNNet::build_()
00386 {
00387     if (!train_set)
00388         return;
00389     inputsize_ = train_set->inputsize();
00390     if (output_type=="MSE")
00391     {
00392         if (noutputs<0) noutputs = targetsize_;
00393         else PLASSERT_MSG(noutputs==targetsize_,"NatGradSMPNNet: noutputs should be -1 or match data's targetsize");
00394     }
00395     else if (output_type=="NLL")
00396     {
00397         if (noutputs<0)
00398             PLERROR("NatGradSMPNNet: if output_type=NLL (classification), one \n"
00399                     "should provide noutputs = number of classes, or possibly\n"
00400                     "1 when 2 classes\n");
00401     }
00402     else if (output_type=="cross_entropy")
00403     {
00404         if(noutputs!=1)
00405             PLERROR("NatGradSMPNNet: if output_type=cross_entropy, then \n"
00406                     "noutputs should be 1.\n");
00407     }
00408     else PLERROR("NatGradSMPNNet: output_type should be cross_entropy, NLL or MSE\n");
00409 
00410     if( output_layer_L1_penalty_factor < 0. )
00411         PLWARNING("NatGradSMPNNet::build_ - output_layer_L1_penalty_factor is negative!\n");
00412 
00413     if(use_pvgrad && minibatch_size!=1)
00414         PLERROR("PV's gradient technique (triggered by use_pvgrad): support for minibatch not yet implemented (must have minibatch_size=1)");
00415     
00416     while (hidden_layer_sizes.length()>0 && hidden_layer_sizes[hidden_layer_sizes.length()-1]==0)
00417         hidden_layer_sizes.resize(hidden_layer_sizes.length()-1);
00418     n_layers = hidden_layer_sizes.length()+2;
00419     layer_sizes.resize(n_layers);
00420     layer_sizes.subVec(1,n_layers-2) << hidden_layer_sizes;
00421     layer_sizes[0]=inputsize_;
00422     layer_sizes[n_layers-1]=noutputs;
00423     if (!layer_params.isEmpty())
00424         PLERROR("In NatGradSMPNNet::build_ - Currently, one can only build "
00425                 "a network from scratch");
00426     layer_params.resize(n_layers-1);
00427     layer_mparams.resize(n_layers-1);
00428     layer_params_delta.resize(n_layers-1);
00429     layer_params_gradient.resize(n_layers-1);
00430     layer_params_update.resize(n_layers - 1);
00431     biases.resize(n_layers-1);
00432     activations_scaling.resize(n_layers-1);
00433     weights.resize(n_layers-1);
00434     mweights.resize(n_layers-1);
00435     mean_activations.resize(n_layers-1);
00436     var_activations.resize(n_layers-1);
00437     int n_neurons=0;
00438     int n_params=0;
00439     for (int i=0;i<n_layers-1;i++)
00440     {
00441         n_neurons+=layer_sizes[i+1];
00442         n_params+=layer_sizes[i+1]*(1+layer_sizes[i]);
00443     }
00444 
00445     // Allocate shared memory for parameters.
00446     freeSharedMemory(); // First deallocate memory if needed.
00447     long total_memory_needed = long(n_params) * sizeof(real);
00448     params_id = shmget(IPC_PRIVATE, total_memory_needed, 0666 | IPC_CREAT);
00449     DBG_MODULE_LOG << "params_id = " << params_id << endl;
00450     if (params_id == -1) {
00451         PLERROR("In NatGradSMPNNet::build_ - Error while allocating shared "
00452                 "memory (errno = %d)", errno);
00453     }
00454     params_ptr = (real*) shmat(params_id, 0, 0);
00455     PLCHECK( params_ptr );
00456     long total_int_memory_needed = 1 * sizeof(int);
00457     params_int_id = shmget(IPC_PRIVATE, total_int_memory_needed, 0666 | IPC_CREAT);
00458     DBG_MODULE_LOG << "params_int_id = " << params_int_id << endl;
00459     PLCHECK( params_int_id != -1 );
00460     params_int_ptr = (int*) shmat(params_int_id, 0, 0);
00461     PLCHECK( params_int_ptr );
00462     // We should have copied data from 'all_params' first if there were some!
00463     PLCHECK_MSG( all_params.isEmpty(), "Multiple builds not implemented yet" );
00464     all_params = Vec(n_params, params_ptr);
00465 
00466     all_params.resize(n_params);
00467     all_mparams.resize(n_params);
00468     all_params_gradient.resize(n_params);
00469     all_params_delta.resize(n_params);
00470     params_update.resize(n_params);
00471     params_update.fill(0);
00472 
00473     // depending on how parameters are grouped on the first layer
00474     int n_groups = params_natgrad_per_input_template ? (n_neurons-layer_sizes[1]+layer_sizes[0]+1) : n_neurons;
00475     group_params.resize(n_groups);
00476     group_params_delta.resize(n_groups);
00477     group_params_gradient.resize(n_groups);
00478 
00479     for (int i=0,k=0,p=0;i<n_layers-1;i++)
00480     {
00481         int np=layer_sizes[i+1]*(1+layer_sizes[i]);
00482         // First layer has natural gradient applied on groups of parameters
00483         // linked to the same input -> parameters must be stored TRANSPOSED!
00484         if( i==0 && params_natgrad_per_input_template ) {
00485             PLERROR("This should not be executed");
00486             layer_params[i]=all_params.subVec(p,np).toMat(layer_sizes[i]+1,layer_sizes[i+1]);
00487             layer_params_update[i] = params_update.subVec(p,np).toMat(
00488                     layer_sizes[i] + 1, layer_sizes[i+1]);
00489             layer_mparams[i]=all_mparams.subVec(p,np).toMat(layer_sizes[i]+1,layer_sizes[i+1]);
00490             biases[i]=layer_params[i].subMatRows(0,1);
00491             weights[i]=layer_params[i].subMatRows(1,layer_sizes[i]); //weights[0] from layer 0 to layer 1
00492             mweights[i]=layer_mparams[i].subMatRows(1,layer_sizes[i]); //weights[0] from layer 0 to layer 1
00493             layer_params_gradient[i]=all_params_gradient.subVec(p,np).toMat(layer_sizes[i]+1,layer_sizes[i+1]);
00494             layer_params_delta[i]=all_params_delta.subVec(p,np);
00495             for (int j=0;j<layer_sizes[i]+1;j++,k++)   // include a bias input 
00496             {
00497                 group_params[k]=all_params.subVec(p,layer_sizes[i+1]);
00498                 group_params_delta[k]=all_params_delta.subVec(p,layer_sizes[i+1]);
00499                 group_params_gradient[k]=all_params_gradient.subVec(p,layer_sizes[i+1]);
00500                 p+=layer_sizes[i+1];
00501             }
00502         // Usual parameter storage
00503         }   else    {
00504             layer_params[i]=all_params.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00505             layer_params_update[i] = params_update.subVec(p, np).toMat(
00506                     layer_sizes[i+1], layer_sizes[i] + 1);
00507             layer_mparams[i]=all_mparams.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00508             biases[i]=layer_params[i].subMatColumns(0,1);
00509             weights[i]=layer_params[i].subMatColumns(1,layer_sizes[i]); // weights[0] from layer 0 to layer 1
00510             mweights[i]=layer_mparams[i].subMatColumns(1,layer_sizes[i]); // weights[0] from layer 0 to layer 1
00511             layer_params_gradient[i]=all_params_gradient.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00512             layer_params_delta[i]=all_params_delta.subVec(p,np);
00513             for (int j=0;j<layer_sizes[i+1];j++,k++)
00514             {
00515                 group_params[k]=all_params.subVec(p,1+layer_sizes[i]);
00516                 group_params_delta[k]=all_params_delta.subVec(p,1+layer_sizes[i]);
00517                 group_params_gradient[k]=all_params_gradient.subVec(p,1+layer_sizes[i]);
00518                 p+=1+layer_sizes[i];
00519             }
00520         }
00521         activations_scaling[i].resize(layer_sizes[i+1]);
00522         mean_activations[i].resize(layer_sizes[i+1]);
00523         var_activations[i].resize(layer_sizes[i+1]);
00524     }
00525     if (params_natgrad_template || params_natgrad_per_input_template)
00526     {
00527         int n_input_groups=0;
00528         int n_neuron_groups=0;
00529         if(params_natgrad_template)
00530             n_neuron_groups = n_neurons;
00531         if( params_natgrad_per_input_template ) {
00532             n_input_groups = layer_sizes[0]+1;
00533             if(params_natgrad_template) // override first layer groups if present
00534                 n_neuron_groups -= layer_sizes[1];
00535         }
00536         params_natgrad_per_group.resize(n_input_groups+n_neuron_groups);
00537         for (int i=0;i<n_input_groups;i++)
00538             params_natgrad_per_group[i] = PLearn::deepCopy(params_natgrad_per_input_template);
00539         for (int i=n_input_groups; i<n_input_groups+n_neuron_groups;i++)
00540             params_natgrad_per_group[i] = PLearn::deepCopy(params_natgrad_template);
00541     }
00542     if (neurons_natgrad_template && neurons_natgrad_per_layer.length()==0)
00543     {
00544         neurons_natgrad_per_layer.resize(n_layers); // 0 not used
00545         for (int i=1;i<n_layers;i++) // no need for correcting input layer
00546             neurons_natgrad_per_layer[i] = PLearn::deepCopy(neurons_natgrad_template);
00547     }
00548     neuron_gradients.resize(minibatch_size,n_neurons);
00549     neuron_outputs_per_layer.resize(n_layers); // layer 0 = input, layer n_layers-1 = output
00550     neuron_extended_outputs_per_layer.resize(n_layers); // layer 0 = input, layer n_layers-1 = output
00551     neuron_gradients_per_layer.resize(n_layers); // layer 0 not used
00552     neuron_extended_outputs_per_layer[0].resize(minibatch_size,1+layer_sizes[0]);
00553     neuron_outputs_per_layer[0]=neuron_extended_outputs_per_layer[0].subMatColumns(1,layer_sizes[0]);
00554     neuron_extended_outputs_per_layer[0].column(0).fill(1.0); // for biases
00555     for (int i=1,k=0;i<n_layers;k+=layer_sizes[i],i++)
00556     {
00557         neuron_extended_outputs_per_layer[i].resize(minibatch_size,1+layer_sizes[i]);
00558         neuron_outputs_per_layer[i]=neuron_extended_outputs_per_layer[i].subMatColumns(1,layer_sizes[i]);
00559         neuron_extended_outputs_per_layer[i].column(0).fill(1.0); // for biases
00560         neuron_gradients_per_layer[i] = 
00561             neuron_gradients.subMatColumns(k,layer_sizes[i]);
00562     }
00563     example_weights.resize(minibatch_size);
00564     train_costs.resize(minibatch_size, nTestCosts());
00565 
00566     Profiler::activate();
00567 
00568     // Gradient correlation profiling
00569     //if( corr_profiling_start != corr_profiling_end )  {
00570     //    PLASSERT( (0<=corr_profiling_start) && (corr_profiling_start<corr_profiling_end) );
00571     //    cout << "n_params " << n_params << endl;
00572     //    // Build the names.
00573     //    stringstream ss_suffix;
00574     //    for (int i=0;i<n_layers;i++)    {
00575     //        ss_suffix << "_" << layer_sizes[i];
00576     //    }
00577     //    ss_suffix << "_stages_" << corr_profiling_start << "_" << corr_profiling_end;
00578     //    string str_gc_name = "gCcorr" + ss_suffix.str();
00579     //    string str_ngc_name;
00580     //    if( full_natgrad )  {
00581     //        str_ngc_name = "ngCcorr_full" + ss_suffix.str();
00582     //    }   else if (params_natgrad_template)   {
00583     //        str_ngc_name = "ngCcorr_params" + ss_suffix.str();
00584     //    }
00585     //    // Build the profilers.
00586     //    g_corrprof = new CorrelationProfiler( n_params, str_gc_name);
00587     //    g_corrprof->build();
00588     //    ng_corrprof = new CorrelationProfiler( n_params, str_ngc_name);
00589     //    ng_corrprof->build();
00590     //}
00591 
00592     if (synchronize_update && !delayed_update)
00593         PLERROR("NatGradSMPNNet::build_ - 'synchronize_update' cannot be used "
00594                 "when 'delayed_update' is false");
00595 }
00596 
00598 // build //
00600 void NatGradSMPNNet::build()
00601 {
00602     inherited::build();
00603     build_();
00604 }
00605 
00607 // freeSharedMemory //
00609 void NatGradSMPNNet::freeSharedMemory()
00610 {
00611     DBG_MODULE_LOG << "Freeing shared memory" << endl;
00612     if (params_ptr) {
00613         shmctl(params_id, IPC_RMID, 0);
00614         params_ptr = NULL;
00615         params_id = -1;
00616     }
00617     if (params_int_ptr) {
00618         shmctl(params_int_id, IPC_RMID, 0);
00619         params_int_ptr = NULL;
00620         params_int_id = -1;
00621     }
00622 }
00623 
00624 
00626 // makeDeepCopyFromShallowCopy //
00628 void NatGradSMPNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00629 {
00630     inherited::makeDeepCopyFromShallowCopy(copies);
00631 
00632     deepCopyField(hidden_layer_sizes, copies);
00633     deepCopyField(layer_params, copies);
00634     deepCopyField(layer_mparams, copies);
00635     deepCopyField(biases, copies);
00636     deepCopyField(weights, copies);
00637     deepCopyField(mweights, copies);
00638     deepCopyField(activations_scaling, copies);
00639     deepCopyField(neurons_natgrad_template, copies);
00640     deepCopyField(neurons_natgrad_per_layer, copies);
00641     deepCopyField(params_natgrad_template, copies);
00642     deepCopyField(params_natgrad_per_input_template, copies);
00643     deepCopyField(params_natgrad_per_group, copies);
00644     deepCopyField(full_natgrad, copies);
00645     deepCopyField(layer_sizes, copies);
00646     deepCopyField(targets, copies);
00647     deepCopyField(example_weights, copies);
00648     deepCopyField(train_costs, copies);
00649     deepCopyField(neuron_outputs_per_layer, copies);
00650     deepCopyField(neuron_extended_outputs_per_layer, copies);
00651     deepCopyField(all_params, copies);
00652     deepCopyField(all_mparams, copies);
00653     deepCopyField(all_params_gradient, copies);
00654     deepCopyField(layer_params_gradient, copies);
00655     deepCopyField(neuron_gradients, copies);
00656     deepCopyField(neuron_gradients_per_layer, copies);
00657     deepCopyField(all_params_delta, copies);
00658     deepCopyField(group_params, copies);
00659     deepCopyField(group_params_gradient, copies);
00660     deepCopyField(group_params_delta, copies);
00661     deepCopyField(layer_params_delta, copies);
00662 
00663     deepCopyField(pv_gradstats, copies);
00664     deepCopyField(pv_stepsizes, copies);
00665     deepCopyField(pv_stepsigns, copies);
00666 
00667     PLCHECK_MSG(false, "Not fully implemented");
00668 
00669     if (params_ptr)
00670         PLERROR("In NatGradSMPNNet::makeDeepCopyFromShallowCopy - Deep copy of"
00671                 " 'params_ptr' not implemented");
00672     if (params_int_ptr)
00673         PLERROR("In NatGradSMPNNet::makeDeepCopyFromShallowCopy - Deep copy of"
00674                 " 'params_int_ptr' not implemented");
00675 
00676 
00677 
00678 /*
00679     deepCopyField(, copies);
00680 */
00681 }
00682 
00683 
00684 int NatGradSMPNNet::outputsize() const
00685 {
00686     return noutputs;
00687 }
00688 
00689 void NatGradSMPNNet::forget()
00690 {
00694     inherited::forget();
00695     for (int i=0;i<n_layers-1;i++)
00696     {
00697         real delta = 1/sqrt(real(layer_sizes[i]));
00698         random_gen->fill_random_uniform(weights[i],-delta,delta);
00699         biases[i].clear();
00700         activations_scaling[i].fill(1.0);
00701         mean_activations[i].clear();
00702         var_activations[i].fill(1.0);
00703     }
00704     stage = 0;
00705     cumulative_training_time=0;
00706     if (params_averaging_coeff!=1.0)
00707         all_mparams << all_params;
00708     
00709     if(use_pvgrad)
00710     {
00711         pv_gradstats->forget();
00712         int n = all_params.length();
00713         pv_stepsizes.resize(n);
00714         pv_stepsizes.fill(pv_initial_stepsize);
00715         pv_stepsigns.resize(n);
00716         pv_stepsigns.fill(true);
00717     }
00718 
00719     nsteps = 0;
00720     params_update.fill(0);
00721 }
00722 
00724 // train //
00726 void NatGradSMPNNet::train()
00727 {
00728     static int log_idx = -1;
00729     log_idx = (log_idx + 1) % 50;
00730 
00731     /*
00732     PStream tmp_log = openFile("/u/delallea/tmp/tmp_log" + tostring(log_idx),
00733                                PStream::raw_ascii, "w");
00734 
00735     tmp_log << "Starting train " << endl;
00736     tmp_log.flush();
00737     */
00738 
00739     if (inputsize_<0) {
00740         /*
00741         tmp_log << "Calling build" << endl;
00742         tmp_log.flush();
00743         */
00744         build();
00745     }
00746 
00747     targets.resize(minibatch_size,targetsize());  // the train_set's targetsize()
00748 
00749     if(!train_set)
00750         PLERROR("In NNet::train, you did not setTrainingSet");
00751     
00752     if(!train_stats)
00753         setTrainStatsCollector(new VecStatsCollector());
00754 
00755     train_costs.fill(MISSING_VALUE) ;
00756 
00757     train_stats->forget();
00758 
00759     PP<ProgressBar> pb;
00760 
00761     //tmp_log << "Beginning stuff done" << endl;
00762     //tmp_log.flush();
00763 
00764     Profiler::reset("training");
00765     Profiler::start("training");
00766     //Profiler::pl_profile_start("Totaltraining");
00767     if( report_progress && stage < nstages )
00768         pb = new ProgressBar( "Training "+classname(),
00769                               nstages - stage );
00770 
00771     Vec costs_plus_time(nTrainCosts(), MISSING_VALUE);
00772     Vec costs = costs_plus_time.subVec(0, train_costs.width());
00773     int nsamples = train_set->length();
00774 
00775     // Obtain the number of CPUs we want to use.
00776     char* ncpus_ptr = getenv("NCPUS");
00777     if (!ncpus_ptr)
00778         PLERROR("In NatGradSMPNNet::train - The environment variable 'NCPUS' "
00779                 "must be set (to the number of CPUs being used)");
00780     int ncpus = atoi(ncpus_ptr);
00781 
00782     // Semaphore to know which cpu should be updating weights next.
00783     if (semaphore_id >= 0) {
00784         // First get rid of existing semaphore.
00785         int success = semctl(semaphore_id, 0, IPC_RMID);
00786         if (success < 0)
00787             PLERROR("In NatGradSMPNNet::train - Could not remove previous "
00788                     "semaphore (errno = %d)", errno);
00789         semaphore_id = -1;
00790     }
00791     // The semaphore has 'ncpus' + 2 values.
00792     // The first one is the index of the CPU that will be next to update
00793     // weights.
00794     // The other ones are 0/1 values that are initialized with 0, and take 1
00795     // once the corresponding CPU has finished all updates for this training
00796     // period.
00797     // Finally, the last value is 0 when 'synchronize_update' is false, and
00798     // otherwise is:
00799     // - in a first step, the number of CPUs that have finished performing
00800     // their mini-batch computation,
00801     // - in a second step, the number of CPUs that have finished updating the
00802     // shared parameters.
00803     semaphore_id = semget(IPC_PRIVATE, ncpus + 2, 0666 | IPC_CREAT);
00804     if (semaphore_id == -1)
00805         PLERROR("In NatGradSMPNNet::train - Could not create semaphore "
00806                 "(errno = %d)", errno);
00807     // Initialize all values in the semaphore to zero.
00808     semun semun_v;
00809     semun_v.val = 0;
00810     for (int i = 0; i < ncpus + 2; i++) {
00811         int success = semctl(semaphore_id, i, SETVAL, semun_v);
00812         if (success != 0)
00813             PLERROR("In NatGradSMPNNet::train - Could not initialize semaphore"
00814                     " value (errno = %d)", errno);
00815     }
00816 
00817     // Initialize current stage, stored in integer shared memory.
00818     int stage_idx = 0;
00819     params_int_ptr[stage_idx] = stage;
00820 
00821     //tmp_log << "Ready to fork" << endl;
00822     //tmp_log.flush();
00823 
00824     // No need to call wait() to acknowledge the death of a child process in
00825     // order to avoid defunct processes.
00826     signal(SIGCLD, SIG_IGN);
00827 
00828     // Fork one process/cpu.
00829     int iam = 0;
00830     for (int cpu = 1; cpu < ncpus ; cpu++)
00831         if (fork() == 0) {
00832             iam = cpu;
00833             break;
00834         }
00835 
00836     if (!iam) {
00837         //tmp_log << "Forked" << endl;
00838         //tmp_log.flush();
00839     }
00840 
00841     // Each processor computes gradient over its own subset of samples (between
00842     // indices 'start' and 'start + my_n_samples' in the training set).
00843     int n_left = nsamples % ncpus;
00844     int n_per_cpu = nsamples / ncpus;
00845     int start, my_n_samples;
00846     if (iam < n_left) {
00847         // This CPU is given one extra training sample to compensate for the
00848         // fact that the number of samples is not an exact multiple of the
00849         // number of CPUs.
00850         start = (n_per_cpu + 1) * iam;
00851         my_n_samples = n_per_cpu + 1;
00852     } else {
00853         start = (n_per_cpu + 1) * n_left + n_per_cpu * (iam - n_left);
00854         my_n_samples = n_per_cpu;
00855     }
00856     if (iam == 0)
00857         PLASSERT_MSG( start == 0, "First CPU must start at first sample" );
00858     if (iam == ncpus - 1)
00859         PLASSERT_MSG( start + my_n_samples == nsamples,
00860                       "Last CPU must start at last sample" );
00861 
00862     // The total number of examples that must be seen is given by 'stage_incr',
00863     // computed as 'nstages - stage'. Each CPU is responsible for going through
00864     // a fraction of 'stage_incr', denoted by 'my_stage_incr'.
00865     int stage_incr = nstages - stage;
00866     int stage_incr_per_cpu = stage_incr / ncpus;
00867     int stage_incr_left = stage_incr % ncpus;
00868     int my_stage_incr = iam >= stage_incr_left ? stage_incr_per_cpu
00869                                                : stage_incr_per_cpu + 1;
00870 
00871     PP<PTimer> ptimer;
00872     // Number of mini-batches that have been processed before one update.
00873     int n_minibatches_per_update = 0;
00874     StatsCollector nmbpu_stats; // Use -1 in constructor if you want the median.
00875 
00876     if (iam == 0) {
00877         //tmp_log << "Starting loop" << endl;
00878         //tmp_log.flush();
00879         ptimer = new PTimer();
00880         Profiler::reset("big_loop");
00881         Profiler::start("big_loop");
00882         ptimer->startTimer("big_loop");
00883     }
00884 
00885     // TODO Maybe...
00886     // - see if it has anything to do with accessing shared memory
00887     // - try to mix in data with a lower or higher measure_every, just to see
00888     // if the difference in behaviors in speedup_whilefalse is due to having
00889     // less examples to process.
00890 
00891     //pout << "CPU " << iam << ": my_stage_incr = " << my_stage_incr << endl;
00892     for(int i = 0; i < my_stage_incr; i++)
00893     {
00894         int sample = start + i % my_n_samples;
00895         int b = i % minibatch_size;
00896         Vec input = neuron_outputs_per_layer[0](b);
00897         Vec target = targets(b);
00898         //Profiler::pl_profile_start("getting_data");
00899         train_set->getExample(sample, input, target, example_weights[b]);
00900         //Profiler::pl_profile_end("getting_data");
00901         if (b == minibatch_size - 1 || i == my_stage_incr - 1 )
00902         {
00903             // Read the current stage value (will be used to compute the
00904             // current learning rate).
00905             int cur_stage = params_int_ptr[stage_idx];
00906             PLASSERT( cur_stage >= 0 );
00907             // Note that we should actually call onlineStep only on the subset
00908             // of samples that are new (compared to the previous mini-batch).
00909             // This is left as a TODO since it is not a priority.
00910             /*
00911             string samples_str = tostring(samples);
00912             printf("CPU %d computing (cur_stage = %d) on samples: %s\n",
00913                     iam, cur_stage, samples_str.c_str());
00914                     */
00915             onlineStep(cur_stage, targets, train_costs, example_weights );
00916             n_minibatches_per_update++;
00917             /*
00918             pout << "CPU " << iam << ": n_minibatches_per_update = "
00919                  << n_minibatches_per_update << endl;
00920                  */
00921             /*
00922             sleep(iam);
00923             string update = tostring(params_update);
00924             printf("\nCPU %d's current update: %s\n", iam, update.c_str());
00925             */
00926             nsteps += b + 1;
00927             /*
00928             for (int i=0;i<minibatch_size;i++)
00929             {
00930                 costs << train_costs(b);
00931                 train_stats->update( costs_plus_time );
00932             }
00933             */
00934             // Update weights if it is this cpu's turn.
00935             bool performed_update = false; // True when this CPU has updated.
00936             while (true) {
00937             int sem_value = semctl(semaphore_id, 0, GETVAL);
00938             if (sem_value == iam) {
00939                 int n_ready = 0;
00940                 if (synchronize_update && !performed_update) {
00941                     // We first indicate that this CPU is ready to perform his
00942                     // update.
00943                     n_ready = semctl(semaphore_id, ncpus + 1, GETVAL);
00944                     n_ready++;
00945                     semun_v.val = n_ready;
00946                     int success = semctl(semaphore_id, ncpus + 1, SETVAL,
00947                                          semun_v);
00948                     PLCHECK( success == 0 );
00949                 }
00950                 if (delayed_update && (!synchronize_update ||
00951                                        (!performed_update && n_ready > ncpus)))
00952                 {
00953                     // Once all CPUs are ready we can actually perform the
00954                     // updates.
00955                     //printf("CPU %d updating (nsteps = %d)\n", iam, nsteps);
00956                     all_params += params_update;
00957                     //params_update += all_params;
00958                     params_update.clear();
00959                     nmbpu_stats.update(real(n_minibatches_per_update));
00960                     n_minibatches_per_update = 0;
00961                     performed_update = true;
00962                 }
00963                 if (nsteps > 0) {
00964                     // Update the current stage.
00965                     cur_stage = params_int_ptr[stage_idx];
00966                     PLASSERT( cur_stage >= 0 );
00967                     int new_stage = cur_stage + nsteps;
00968                     params_int_ptr[stage_idx] = new_stage;
00969                     nsteps = 0;
00970                 }
00971                 if (n_ready == 2 * ncpus) {
00972                     // The last CPU has updated the parameters. All CPUs can
00973                     // now break out of this loop.
00974                     n_ready = semun_v.val = 0;
00975                     int success = semctl(semaphore_id, ncpus + 1, SETVAL,
00976                                          semun_v);
00977                     PLCHECK( success == 0 );
00978                 }
00979                 // Give update token to next CPU.
00980                 sem_value = (sem_value + 1) % ncpus;
00981                 semun_v.val = sem_value;
00982                 int success = semctl(semaphore_id, 0, SETVAL, semun_v);
00983                 if (success != 0)
00984                     PLERROR("In NatGradSMPNNet::train - Could not update "
00985                             "semaphore with next CPU (errno = %d, returned "
00986                             "value = %d, set value = %d)", errno, success,
00987                             semun_v.val);
00988                 if (!delayed_update || n_ready == 0)
00989                     // If 'synchronize_update' is false this is always true.
00990                     // If 'synchronize_update' is true this means all CPUs have
00991                     // updated the parameters.
00992                     break;
00993             } else {
00994                 if (!synchronize_update)
00995                     // We do not wait our turn: instead we move on to the next
00996                     // minibatch.
00997                     break;
00998                 if (performed_update) {
00999                     // TODO We could break here by checking the 'n_ready'
01000                     // semaphore: once it is reset to zero everyone can exit at
01001                     // once without necessarily doing it in turn.
01002                 }
01003             }
01004             }
01005         }
01006         /*
01007         if (params_averaging_coeff!=1.0 && 
01008             b==minibatch_size-1 && 
01009             (stage+1)%(minibatch_size*params_averaging_freq)==0)
01010         {
01011             PLERROR("Not implemented for SMP");
01012             multiplyScaledAdd(all_params, 1-params_averaging_coeff,
01013                     params_averaging_coeff, all_mparams);
01014         }
01015         if( pb ) {
01016             PLERROR("Progress bar not implemented for SMP");
01017             pb->update( stage + 1 );
01018         }
01019         */
01020     }
01021 
01022 
01023     if (iam == 0) {
01024         //tmp_log << "Loop ended" << endl;
01025         //tmp_log.flush();
01026         Profiler::end("big_loop");
01027         ptimer->stopTimer("big_loop");
01028     }
01029 
01030     if (!wait_for_final_update) {
01031         if (nsteps >  0) {
01032             //printf("CPU %d final updating (nsteps =%d)\n", iam, nsteps);
01033             if (delayed_update) {
01034                 all_params += params_update;
01035                 params_update.clear();
01036             }
01037             // Note that the line below is not safe: if two CPUs are running it
01038             // at the same time, the number of stages may not be correct.
01039             params_int_ptr[stage_idx] += nsteps;
01040             nsteps = 0;
01041         }
01042         // Indicate this CPU is done.
01043         semun_v.val = 1;
01044         semctl(semaphore_id, iam + 1, SETVAL, semun_v);
01045         if (iam != 0) {
01046             // Exit additional processes after training.
01047             //printf("CPU %d exiting\n", iam);
01048             exit(0);
01049         }
01050     }
01051 
01052     //Profiler::reset("Synchronization");
01053     //Profiler::start("Synchronization");
01054 
01055     //tmp_log << "Synchronization" << endl;
01056     //tmp_log.flush();
01057 
01058     // Wait until it is our turn.
01059     bool displayed_stats = true;
01060     while (true) {
01061         int sem_value = semctl(semaphore_id, 0, GETVAL);
01062         if (sem_value == iam || iam == 0) {
01063             if (sem_value == iam && wait_for_final_update) {
01064 
01065                 // Display statistics for effective sizes of mini-batches.
01066                 if (!displayed_stats) {
01067                     pout << "CPU " << iam << ": " << endl
01068                         << " - mean  : " << nmbpu_stats.mean() << endl
01069                         << " - stderr: " << nmbpu_stats.stderror() << endl
01070                         << " - median: " << nmbpu_stats.pseudo_quantile(0.5) << endl;
01071                     displayed_stats = true;
01072                 }
01073 
01074                 if (nsteps >  0) {
01075                     //printf("CPU %d final updating (nsteps =%d)\n", iam, nsteps);
01076                     if (delayed_update) {
01077                         all_params += params_update;
01078                         params_update.clear();
01079                     }
01080                     params_int_ptr[stage_idx] += nsteps;
01081                     nsteps = 0;
01082                 }
01083                 // Indicate this CPU is done.
01084                 semun_v.val = 1;
01085                 semctl(semaphore_id, iam + 1, SETVAL, semun_v);
01086                 if (iam != 0) {
01087                     // Exit additional processes after training.
01088                     //printf("CPU %d exiting\n", iam);
01089                     exit(0);
01090                 }
01091             }
01092             PLASSERT( iam == 0 );
01093             if (semctl(semaphore_id, sem_value + 1, GETVAL) == 0) {
01094                 // The next process is not done yet: we need to wait.
01095 #if 0
01096                 printf("Main CPU (%d) still waiting on CPU %d\n", iam,
01097                         sem_value);
01098 #endif
01099                 continue;
01100             }
01101 
01102             // Check if all CPUs are done.
01103             bool finished = true;
01104             for (int i = 0; i < ncpus; i++) {
01105                 if (semctl(semaphore_id, i + 1, GETVAL) == 0) {
01106                     /*
01107                     printf("Main CPU still waiting on CPU %d (GETVAL => %d)\n",
01108                             i, semctl(semaphore_id, i + 1, GETVAL));
01109                             */
01110                     finished = false;
01111                     break;
01112                 }
01113             }
01114             if (finished) {
01115                 //printf("Main CPU ready to finish (all ready!)\n");
01116                 break;
01117             }
01118 
01119             // Next CPU!
01120             sem_value = (sem_value + 1) % ncpus;
01121             semun_v.val = sem_value;
01122             semctl(semaphore_id, 0, SETVAL, semun_v);
01123         }
01124     }
01125 
01126     //tmp_log << "Synchronized" << endl;
01127     //tmp_log.flush();
01128     //Profiler::end("Synchronization");
01129     /*
01130     const Profiler::Stats& synch_stats = Profiler::getStats("Synchronization");
01131     real synch_time = (synch_stats.user_duration + synch_stats.system_duration)
01132         / real(Profiler::ticksPerSecond());
01133     DBG_MODULE_LOG << "Synch time: " << synch_time << endl;
01134     */
01135 
01136     // Get current stage (for debug purpose).
01137     int cur_stage = params_int_ptr[stage_idx];
01138     PLASSERT( cur_stage >= 0 );
01139 
01140     // Free semaphore's ressources.
01141     if (semaphore_id >= 0) {
01142         int success = semctl(semaphore_id, 0, IPC_RMID);
01143         if (success < 0)
01144             PLERROR("In NatGradSMPNNet::train - Could not remove previous "
01145                     "semaphore (errno = %d)", errno);
01146         semaphore_id = -1;
01147     }
01148 
01149     //tmp_log << "Finishing stuff" << endl;
01150     //tmp_log.flush();
01151 
01152     // Update the learner's stage.
01153     stage = nstages;
01154     if (stage != cur_stage)
01155         PLWARNING("The target stage (%d) was not reached exactly (actual "
01156                 "stage: %d)", stage, cur_stage);
01157 
01158     Profiler::end("training");
01159     //Profiler::pl_profile_end("Totaltraining");
01160     /*
01161     if (verbosity>0)
01162         Profiler::report(cout);
01163         */
01164     const Profiler::Stats& stats = Profiler::getStats("training");
01165     const Profiler::Stats& big_loop_stats = Profiler::getStats("big_loop");
01166     costs.fill(MISSING_VALUE);
01167     real ticksPerSec = Profiler::ticksPerSecond();
01168     real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec;
01169     cumulative_training_time += cpu_time;
01170     costs_plus_time[train_costs.width()] = cpu_time;
01171     costs_plus_time[train_costs.width()+1] = cumulative_training_time;
01172     costs_plus_time[train_costs.width()+2] =
01173         (big_loop_stats.user_duration + big_loop_stats.system_duration) /
01174         ticksPerSec;
01175     costs_plus_time[train_costs.width() + 3] = ptimer->getTimer("big_loop");
01176     train_stats->update( costs_plus_time );
01177     train_stats->finalize(); // finalize statistics for this epoch
01178 
01179     //tmp_log << "Done!" << endl;
01180     //tmp_log.flush();
01181 
01182     // profiling gradient correlation
01183     //if( g_corrprof )    {
01184     //    PLASSERT( corr_profiling_end <= nstages );
01185     //    g_corrprof->printAndReset();
01186     //    ng_corrprof->printAndReset();
01187     //}
01188 
01189 }
01190 
01191 void NatGradSMPNNet::onlineStep(int cur_stage, const Mat& targets,
01192                              Mat& train_costs, Vec example_weights)
01193 {
01194     // mean gradient over minibatch_size examples has less variance, can afford larger learning rate
01195     // TODO Note that this scaling formula is disabled to avoid confusion about
01196     // what learning rates are being used in experiments.
01197     real lrate = /*sqrt(real(minibatch_size))* */ init_lrate/(1 + cur_stage * lrate_decay);
01198     PLASSERT(targets.length()==minibatch_size && train_costs.length()==minibatch_size && example_weights.length()==minibatch_size);
01199     fpropNet(minibatch_size, true);
01200     fbpropLoss(neuron_outputs_per_layer[n_layers-1],targets,example_weights,train_costs);
01201     for (int i=n_layers-1;i>0;i--)
01202     {
01203         // here neuron_gradients_per_layer[i] contains the gradient on activations (weighted sums)
01204         //      (minibatch_size x layer_size[i])
01205 
01206         Mat previous_neurons_gradient = neuron_gradients_per_layer[i-1];
01207         Mat next_neurons_gradient = neuron_gradients_per_layer[i];
01208         Mat previous_neurons_output = neuron_outputs_per_layer[i-1];
01209         real layer_lrate_factor = (i==n_layers-1)?output_layer_lrate_scale:1;
01210         if (self_adjusted_scaling_and_bias && i+1<n_layers-1)
01211             for (int k=0;k<minibatch_size;k++)
01212             {
01213                 Vec g=next_neurons_gradient(k);
01214                 g*=activations_scaling[i-1]; // pass gradient through scaling
01215             }
01216         if (input_size_lrate_normalization_power==-1)
01217             layer_lrate_factor /= sumsquare(neuron_extended_outputs_per_layer[i-1]);
01218         else if (input_size_lrate_normalization_power==-2)
01219             layer_lrate_factor /= sqrt(sumsquare(neuron_extended_outputs_per_layer[i-1]));
01220         else if (input_size_lrate_normalization_power!=0)
01221         {
01222             int fan_in = neuron_extended_outputs_per_layer[i-1].length();
01223             if (input_size_lrate_normalization_power==1)
01224                 layer_lrate_factor/=fan_in;
01225             else if (input_size_lrate_normalization_power==2)
01226                 layer_lrate_factor/=sqrt(real(fan_in));
01227             else layer_lrate_factor/=pow(fan_in,1.0/input_size_lrate_normalization_power);
01228         }
01229         // optionally correct the gradient on neurons using their covariance
01230         if (neurons_natgrad_template && neurons_natgrad_per_layer[i])
01231         {
01232             static Vec tmp;
01233             tmp.resize(layer_sizes[i]);
01234             for (int k=0;k<minibatch_size;k++)
01235             {
01236                 Vec g_k = next_neurons_gradient(k);
01237                 PLERROR("Not implemented (t not available)");
01238                 //(*neurons_natgrad_per_layer[i])(t-minibatch_size+1+k,g_k,tmp);
01239                 g_k << tmp;
01240             }
01241         }
01242         if (i>1) // compute gradient on previous layer
01243         {
01244             // propagate gradients
01245             //Profiler::pl_profile_start("ProducScaleAccOnlineStep");
01246             productScaleAcc(previous_neurons_gradient,next_neurons_gradient,false,
01247                             weights[i-1],false,1,0);
01248             //Profiler::pl_profile_end("ProducScaleAccOnlineStep");
01249             // propagate through tanh non-linearity
01250             for (int j=0;j<previous_neurons_gradient.length();j++)
01251             {
01252                 real* grad = previous_neurons_gradient[j];
01253                 real* out = previous_neurons_output[j];
01254                 for (int k=0;k<previous_neurons_gradient.width();k++,out++)
01255                     grad[k] *= (1 - *out * *out); // gradient through tanh derivative
01256             }
01257         }
01258         // compute gradient on parameters, possibly update them
01259         if (use_pvgrad)
01260         {
01261             PLERROR("What is this?");
01262             productScaleAcc(layer_params_gradient[i-1],next_neurons_gradient,true,
01263                             neuron_extended_outputs_per_layer[i-1],false,1,0);
01264         }
01265         else if (full_natgrad || params_natgrad_template || params_natgrad_per_input_template) 
01266         {
01267 //alternate
01268             PLERROR("No, I just want stochastic gradient!");
01269             if( params_natgrad_per_input_template && i==1 ){ // parameters are transposed
01270                 productScaleAcc(layer_params_gradient[i-1],
01271                             neuron_extended_outputs_per_layer[i-1], true,
01272                             next_neurons_gradient, false, 
01273                             1, 0);
01274             }else{
01275                 productScaleAcc(layer_params_gradient[i-1],next_neurons_gradient,true,
01276                             neuron_extended_outputs_per_layer[i-1],false,1,0);
01277             }
01278             layer_params_gradient[i-1] *= 1.0/minibatch_size; // use the MEAN gradient
01279         } else {// just regular stochastic gradient
01280             // compute gradient on weights and update them in one go (more efficient)
01281             // mean gradient has less variance, can afford larger learning rate
01282             //Profiler::pl_profile_start("ProducScaleAccOnlineStep");
01283             if (delayed_update) {
01284                 // Store updates in 'layer_params_update'.
01285                 //layer_params_update[i - 1].fill(0);
01286                 productScaleAcc(layer_params_update[i - 1],
01287                         next_neurons_gradient, true,
01288                         neuron_extended_outputs_per_layer[i-1], false,
01289                         -layer_lrate_factor*lrate, 1);
01290             } else {
01291                 // Directly update the parameters.
01292                 productScaleAcc(layer_params[i-1],next_neurons_gradient,true,
01293                         neuron_extended_outputs_per_layer[i-1],false,
01294                         -layer_lrate_factor*lrate /* /minibatch_size */, 1);
01295             }
01296             //Profiler::pl_profile_end("ProducScaleAccOnlineStep");
01297         }
01298     }
01299     if (use_pvgrad)
01300     {
01301         PLERROR("What is this?");
01302         pvGradUpdate();
01303     }
01304     else if (full_natgrad)
01305     {
01306         PLERROR("Not implemented (t not available)");
01307         //(*full_natgrad)(t/minibatch_size,all_params_gradient,all_params_delta); // compute update direction by natural gradient
01308         if (output_layer_lrate_scale!=1.0)
01309             layer_params_delta[n_layers-2] *= output_layer_lrate_scale; // scale output layer's learning rate
01310         multiplyAcc(all_params,all_params_delta,-lrate); // update
01311         // Hack to apply batch gradient even in this case (used for profiling
01312         // the gradient correlations)
01313         //if (output_layer_lrate_scale!=1.0)
01314         //      layer_params_gradient[n_layers-2] *= output_layer_lrate_scale; // scale output layer's learning rate
01315         //  multiplyAcc(all_params,all_params_gradient,-lrate); // update
01316 
01317     } else if (params_natgrad_template || params_natgrad_per_input_template)
01318     {
01319         PLERROR("Not implemented (t not available)");
01320         for (int i=0;i<params_natgrad_per_group.length();i++)
01321         {
01322             //GradientCorrector& neuron_natgrad = *(params_natgrad_per_group[i]);
01323             //neuron_natgrad(t/minibatch_size,group_params_gradient[i],group_params_delta[i]); // compute update direction by natural gradient
01324         }
01325 //alternate
01326         if (output_layer_lrate_scale!=1.0)
01327             layer_params_delta[n_layers-2] *= output_layer_lrate_scale; // scale output layer's learning rate 
01328         multiplyAcc(all_params,all_params_delta,-lrate); // update
01329     }
01330 
01331     // profiling gradient correlation
01332     //if( (t>=corr_profiling_start) && (t<=corr_profiling_end) && g_corrprof )    {
01333     //    (*g_corrprof)(all_params_gradient);
01334     //    (*ng_corrprof)(all_params_delta);
01335     //}
01336 
01337     // Output layer L1 regularization
01338     if( output_layer_L1_penalty_factor != 0. )    {
01339         PLERROR("Not implemented");
01340         real L1_delta = lrate * output_layer_L1_penalty_factor;
01341         real* m_i = layer_params[n_layers-2].data();
01342 
01343         for(int i=0; i<layer_params[n_layers-2].length(); i++,m_i+=layer_params[n_layers-2].mod())  {
01344             for(int j=0; j<layer_params[n_layers-2].width(); j++)   {
01345                 if( m_i[j] > L1_delta )
01346                     m_i[j] -= L1_delta;
01347                 else if( m_i[j] < -L1_delta )
01348                     m_i[j] += L1_delta;
01349                 else
01350                     m_i[j] = 0.;
01351             }
01352         }
01353     }
01354 
01355 }
01356 
01357 void NatGradSMPNNet::pvGradUpdate()
01358 {
01359     int n = all_params_gradient.length();
01360     if(pv_stepsizes.length()==0)
01361     {
01362         pv_stepsizes.resize(n);
01363         pv_stepsizes.fill(pv_initial_stepsize);
01364         pv_stepsigns.resize(n);
01365         pv_stepsigns.fill(true);
01366     }
01367     pv_gradstats->update(all_params_gradient);
01368     real pv_deceleration = 1.0/pv_acceleration;
01369     for(int k=0; k<n; k++)
01370     {
01371         StatsCollector& st = pv_gradstats->getStats(k);
01372         int n = (int)st.nnonmissing();
01373         if(n>pv_min_samples)
01374         {
01375             real m = st.mean();
01376             real e = st.stderror();
01377             real prob_pos = gauss_01_cum(m/e);
01378             real prob_neg = 1.-prob_pos;
01379             if(!pv_random_sample_step)
01380             {
01381                 if(prob_pos>=pv_required_confidence)
01382                 {
01383                     all_params[k] += pv_stepsizes[k];
01384                     pv_stepsizes[k] *= (pv_stepsigns[k]?pv_acceleration:pv_deceleration);
01385                     pv_stepsigns[k] = true;
01386                     st.forget();
01387                 }
01388                 else if(prob_neg>=pv_required_confidence)
01389                 {
01390                     all_params[k] -= pv_stepsizes[k];
01391                     pv_stepsizes[k] *= ((!pv_stepsigns[k])?pv_acceleration:pv_deceleration);
01392                     pv_stepsigns[k] = false;
01393                     st.forget();
01394                 }
01395             }
01396             else  // random sample update direction (sign)
01397             {
01398                 bool ispos = (random_gen->binomial_sample(prob_pos)>0);
01399                 if(ispos) // picked positive
01400                     all_params[k] += pv_stepsizes[k];
01401                 else  // picked negative
01402                     all_params[k] -= pv_stepsizes[k];
01403                 pv_stepsizes[k] *= (pv_stepsigns[k]==ispos) ?pv_acceleration :pv_deceleration;
01404                 pv_stepsigns[k] = ispos;
01405                 st.forget();
01406             }
01407         }
01408     }
01409 }
01410 
01411 void NatGradSMPNNet::computeOutput(const Vec& input, Vec& output) const
01412 {
01413     /*
01414     static int out_idx = -1;
01415     out_idx = (out_idx + 1) % 50;
01416     PStream out_log_file = openFile("/u/delallea/tmp/out_log_" +
01417             tostring(out_idx), PStream::raw_ascii, "w");
01418     out_log_file << "Starting to compute output on " << input << endl;
01419     out_log_file.flush();
01420     */
01421     //Profiler::pl_profile_start("computeOutput");
01422     neuron_outputs_per_layer[0](0) << input;
01423     fpropNet(1,false);
01424     output << neuron_outputs_per_layer[n_layers-1](0);
01425     //Profiler::pl_profile_end("computeOutput");
01426     /*
01427     out_log_file << "Output computed" << endl;
01428     out_log_file.flush();
01429     */
01430 }
01431 
01433 void NatGradSMPNNet::fpropNet(int n_examples, bool during_training) const
01434 {
01435     PLASSERT_MSG(n_examples<=minibatch_size,"NatGradSMPNNet::fpropNet: nb input vectors treated should be <= minibatch_size\n");
01436     for (int i=0;i<n_layers-1;i++)
01437     {
01438         Mat prev_layer = (self_adjusted_scaling_and_bias && i+1<n_layers-1)?
01439             neuron_outputs_per_layer[i]:neuron_extended_outputs_per_layer[i];
01440         Mat next_layer = neuron_outputs_per_layer[i+1];
01441         if (n_examples!=minibatch_size)
01442         {
01443             prev_layer = prev_layer.subMatRows(0,n_examples);
01444             next_layer = next_layer.subMatRows(0,n_examples);
01445         }
01446 //alternate
01447         // Are the input weights transposed? (because of ...)
01448         bool tw = true;
01449         if( params_natgrad_per_input_template && i==0 )
01450             tw = false;
01451 
01452         // try to use BLAS for the expensive operation
01453         if (self_adjusted_scaling_and_bias && i+1<n_layers-1){
01454             productScaleAcc(next_layer, prev_layer, false, 
01455                             (during_training || params_averaging_coeff==1.0)?
01456                             weights[i]:mweights[i], 
01457                             tw, 1, 0);
01458         }else{
01459             productScaleAcc(next_layer, prev_layer, false, 
01460                             (during_training || params_averaging_coeff==1.0)?
01461                             layer_params[i]:layer_mparams[i], 
01462                             tw, 1, 0);
01463         }
01464         // compute layer's output non-linearity
01465         if (i+1<n_layers-1)
01466             for (int k=0;k<n_examples;k++)
01467             {
01468                 Vec L=next_layer(k);
01469                 if (self_adjusted_scaling_and_bias)
01470                 {
01471                     real* m=mean_activations[i].data();
01472                     real* v=var_activations[i].data();
01473                     real* a=L.data();
01474                     real* s=activations_scaling[i].data();
01475                     real* b=biases[i].data(); // biases[i] is a 1-column matrix
01476                     int bmod = biases[i].mod();
01477                     for (int j=0;j<layer_sizes[i+1];j++,b+=bmod,m++,v++,a++,s++)
01478                     {
01479                         if (during_training)
01480                         {
01481                             real diff = *a - *m;
01482                             *v = (1-activation_statistics_moving_average_coefficient) * *v
01483                                 + activation_statistics_moving_average_coefficient * diff*diff;
01484                             *m = (1-activation_statistics_moving_average_coefficient) * *m
01485                                 + activation_statistics_moving_average_coefficient * *a;
01486                             *b = target_mean_activation - *m;
01487                             if (*v<100*target_stdev_activation*target_stdev_activation)
01488                                 *s = target_stdev_activation/sqrt(*v);
01489                             else // rescale the weights and the statistics for that neuron
01490                             {
01491                                 real rescale_factor = target_stdev_activation/sqrt(*v);
01492                                 Vec w = weights[i](j);
01493                                 w *= rescale_factor;
01494                                 *b *= rescale_factor;
01495                                 *s = 1;
01496                                 *m *= rescale_factor;
01497                                 *v *= rescale_factor*rescale_factor;
01498                             }
01499                         }
01500                         *a = tanh((*a + *b) * *s);
01501                     }
01502                 }
01503                 else{
01504                     compute_tanh(L,L);
01505                 }
01506             }
01507         else if (output_type=="NLL")
01508             for (int k=0;k<n_examples;k++)
01509             {
01510                 Vec L=next_layer(k);
01511                 log_softmax(L,L);
01512             }
01513         else if (output_type=="cross_entropy")  {
01514             for (int k=0;k<n_examples;k++)
01515             {
01516                 Vec L=next_layer(k);
01517                 log_sigmoid(L,L);
01518             }
01519          }
01520     }
01521 }
01522 
01524 void NatGradSMPNNet::fbpropLoss(const Mat& output, const Mat& target, const Vec& example_weight, Mat& costs) const
01525 {
01526     int n_examples = output.length();
01527     Mat out_grad = neuron_gradients_per_layer[n_layers-1];
01528     if (n_examples!=minibatch_size)
01529         out_grad = out_grad.subMatRows(0,n_examples);
01530     if (output_type=="NLL")
01531     {
01532         for (int i=0;i<n_examples;i++)
01533         {
01534             int target_class = int(round(target(i,0)));
01535             Vec outp = output(i);
01536             Vec grad = out_grad(i);
01537             exp(outp,grad); // map log-prob to prob
01538             costs(i,0) = -outp[target_class];
01539             costs(i,1) = (target_class == argmax(outp))?0:1;
01540             grad[target_class]-=1;
01541             if (example_weight[i]!=1.0)
01542                 costs(i,0) *= example_weight[i];
01543         }
01544     }
01545     else if(output_type=="cross_entropy")   {
01546         for (int i=0;i<n_examples;i++)
01547         {
01548             int target_class = int(round(target(i,0)));
01549             Vec outp = output(i);
01550             Vec grad = out_grad(i);
01551             exp(outp,grad); // map log-prob to prob
01552             if( target_class == 1 ) {
01553                 costs(i,0) = - outp[0];
01554                 costs(i,1) = (grad[0]>0.5)?0:1;
01555             }   else    {
01556                 costs(i,0) = - pl_log( 1.0 - grad[0] );
01557                 costs(i,1) = (grad[0]>0.5)?1:0;
01558             }
01559             grad[0] -= (real)target_class;
01560             if (example_weight[i]!=1.0)
01561                 costs(i,0) *= example_weight[i];
01562         }
01563 //cout << "costs\t" << costs(0) << endl;
01564 //cout << "gradient\t" << out_grad(0) << endl;
01565 
01566     }
01567     else // if (output_type=="MSE")
01568     {
01569         substract(output,target,out_grad);
01570         for (int i=0;i<n_examples;i++)
01571         {
01572             costs(i,0) = pownorm(out_grad(i));
01573             if (example_weight[i]!=1.0)
01574             {
01575                 out_grad(i) *= example_weight[i];
01576                 costs(i,0) *= example_weight[i];
01577             }
01578         }
01579     }
01580 }
01581 
01582 void NatGradSMPNNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
01583                                            const Vec& target, Vec& costs) const
01584 {
01585     Vec w(1);
01586     w[0]=1;
01587     Mat outputM = output.toMat(1,output.length());
01588     Mat targetM = target.toMat(1,output.length());
01589     Mat costsM = costs.toMat(1,costs.length());
01590     fbpropLoss(outputM,targetM,w,costsM);
01591 }
01592 
01593 TVec<string> NatGradSMPNNet::getTestCostNames() const
01594 {
01595     TVec<string> costs;
01596     if (output_type=="NLL")
01597     {
01598         costs.resize(2);
01599         costs[0]="NLL";
01600         costs[1]="class_error";
01601     }
01602     else if (output_type=="cross_entropy")  {
01603         costs.resize(2);
01604         costs[0]="cross_entropy";
01605         costs[1]="class_error";
01606     }
01607     else if (output_type=="MSE")
01608     {
01609         costs.resize(1);
01610         costs[0]="MSE";
01611     }
01612     return costs;
01613 }
01614 
01615 TVec<string> NatGradSMPNNet::getTrainCostNames() const
01616 {
01617     TVec<string> costs = getTestCostNames();
01618     costs.append("train_seconds");
01619     costs.append("cum_train_seconds");
01620     costs.append("big_loop_seconds_1");
01621     costs.append("big_loop_seconds_2");
01622     return costs;
01623 }
01624 
01625 NatGradSMPNNet::~NatGradSMPNNet()
01626 {
01627     freeSharedMemory();
01628     if (semaphore_id >= 0) {
01629         int success = semctl(semaphore_id, 0, IPC_RMID);
01630         if (success < 0)
01631             PLERROR("In NatGradSMPNNet::train - Could not remove previous "
01632                     "semaphore (errno = %d)", errno);
01633         semaphore_id = -1;
01634     }
01635 }
01636 
01637 } // end of namespace PLearn
01638 
01639 
01640 /*
01641   Local Variables:
01642   mode:c++
01643   c-basic-offset:4
01644   c-file-style:"stroustrup"
01645   c-file-offsets:((innamespace . 0)(inline-open . 0))
01646   indent-tabs-mode:nil
01647   fill-column:79
01648   End:
01649 */
01650 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines