PLearn 0.1
NatGradSMPNNet.h
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NatGradSMPNNet.h
00004 //
00005 // Copyright (C) 2007 Yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Yoshua Bengio
00036 
00040 #ifndef NatGradSMPNNet_INC
00041 #define NatGradSMPNNet_INC
00042 
00043 #include <plearn_learners/generic/PLearner.h>
00044 #include <plearn_learners/generic/GradientCorrector.h>
00045 #include <plearn/sys/Profiler.h>
00046 //#include "CorrelationProfiler.h"
00047 
00048 namespace PLearn {
00049 
00053 class NatGradSMPNNet : public PLearner
00054 {
00055     typedef PLearner inherited;
00056 
00057 public:
00058     //#####  Public Build Options  ############################################
00059 
00060     bool delayed_update;
00061     bool wait_for_final_update;
00062     bool synchronize_update;
00063 
00064     int noutputs;
00065 
00067     TVec<int> hidden_layer_sizes;
00068 
00071     TVec<Mat> layer_params;
00073     TVec<Mat> layer_mparams;
00074 
00076     real params_averaging_coeff;
00078     int params_averaging_freq;
00079 
00081     real init_lrate;
00082 
00084     real lrate_decay;
00085 
00087     real output_layer_L1_penalty_factor;
00088 
00090     real output_layer_lrate_scale;
00091 
00093     int minibatch_size;
00094 
00097     PP<GradientCorrector> neurons_natgrad_template;
00098     TVec<PP<GradientCorrector> > neurons_natgrad_per_layer;
00099 
00102     PP<GradientCorrector> params_natgrad_template;
00110     PP<GradientCorrector> params_natgrad_per_input_template;
00111 
00113     TVec<PP<GradientCorrector> > params_natgrad_per_group;
00114 
00118     PP<GradientCorrector> full_natgrad;
00119 
00121     string output_type;
00122 
00127     real input_size_lrate_normalization_power;
00128 
00133     real lrate_scale_factor;
00134     int lrate_scale_factor_max_power;
00135     int lrate_scale_factor_min_power;
00136 
00140     bool self_adjusted_scaling_and_bias;
00141     real target_mean_activation;
00142     real target_stdev_activation;
00143     // the mean and variance of the activations is estimated by a moving
00144     // average with this coefficient (near 0 for very slow averaging)
00145     real activation_statistics_moving_average_coefficient;
00146 
00147     int verbosity;
00148 
00150     //int corr_profiling_start, corr_profiling_end;
00151 
00152 public:
00153     //*************************************************************
00154     //*** Members used for Pascal Vincent's gradient technique  ***
00155 
00157     bool use_pvgrad;
00158 
00160     real pv_initial_stepsize;
00161 
00163     real pv_acceleration;
00164 
00166     int pv_min_samples;
00167 
00169     real pv_required_confidence;
00170 
00172     // each parameter based on the estimated probability of it being positive or
00173     // negative.
00174     bool pv_random_sample_step;
00175     
00176 
00177 protected:
00179     PP<VecStatsCollector> pv_gradstats;
00180 
00182     Vec pv_stepsizes;
00183 
00185     TVec<bool> pv_stepsigns;
00186 
00187 public:
00188     //#####  Public Member Functions  #########################################
00189 
00190     NatGradSMPNNet();
00191 
00193     virtual ~NatGradSMPNNet();
00194 
00195     //#####  PLearner Member Functions  #######################################
00196 
00199     // (PLEASE IMPLEMENT IN .cc)
00200     virtual int outputsize() const;
00201 
00205     // (PLEASE IMPLEMENT IN .cc)
00206     virtual void forget();
00207 
00211     // (PLEASE IMPLEMENT IN .cc)
00212     virtual void train();
00213 
00215     // (PLEASE IMPLEMENT IN .cc)
00216     virtual void computeOutput(const Vec& input, Vec& output) const;
00217 
00219     // (PLEASE IMPLEMENT IN .cc)
00220     virtual void computeCostsFromOutputs(const Vec& input, const Vec& output,
00221                                          const Vec& target, Vec& costs) const;
00222 
00225     // (PLEASE IMPLEMENT IN .cc)
00226     virtual TVec<std::string> getTestCostNames() const;
00227 
00230     // (PLEASE IMPLEMENT IN .cc)
00231     virtual TVec<std::string> getTrainCostNames() const;
00232 
00233 
00234     // *** SUBCLASS WRITING: ***
00235     // While in general not necessary, in case of particular needs
00236     // (efficiency concerns for ex) you may also want to overload
00237     // some of the following methods:
00238     // virtual void computeOutputAndCosts(const Vec& input, const Vec& target,
00239     //                                    Vec& output, Vec& costs) const;
00240     // virtual void computeCostsOnly(const Vec& input, const Vec& target,
00241     //                               Vec& costs) const;
00242     // virtual void test(VMat testset, PP<VecStatsCollector> test_stats,
00243     //                   VMat testoutputs=0, VMat testcosts=0) const;
00244     // virtual int nTestCosts() const;
00245     // virtual int nTrainCosts() const;
00246     // virtual void resetInternalState();
00247     // virtual bool isStatefulLearner() const;
00248 
00249 
00250     //#####  PLearn::Object Protocol  #########################################
00251 
00252     // Declares other standard object methods.
00253     // ### If your class is not instantiatable (it has pure virtual methods)
00254     // ### you should replace this by PLEARN_DECLARE_ABSTRACT_OBJECT_METHODS
00255     PLEARN_DECLARE_OBJECT(NatGradSMPNNet);
00256 
00257     // Simply calls inherited::build() then build_()
00258     virtual void build();
00259 
00261     // (PLEASE IMPLEMENT IN .cc)
00262     virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies);
00263 
00264 protected:
00265     //#####  Protected Options  ###############################################
00266 
00267     // ### Declare protected option fields (such as learned parameters) here
00268 
00270     int n_layers;
00271 
00273     TVec<int> layer_sizes;
00274 
00276     TVec<Mat> biases;
00277     TVec<Mat> weights,mweights;
00278     TVec<Vec> activations_scaling; // output = tanh(activations_scaling[layer][neuron] * (biases[layer][neuron] + weights[layer]*input[layer-1])
00279     TVec<Vec> mean_activations;
00280     TVec<Vec> var_activations;
00281     real cumulative_training_time;
00282 
00283 protected:
00284     //#####  Protected Member Functions  ######################################
00285 
00287     static void declareOptions(OptionList& ol);
00288 
00290     static void declareMethods(RemoteMethodMap& rmm);
00291 
00293     void onlineStep(int t, const Mat& targets, Mat& train_costs, Vec example_weights);
00294 
00297     void fpropNet(int n_examples, bool during_training) const;
00298 
00301     void fbpropLoss(const Mat& output, const Mat& target, const Vec& example_weights, Mat& train_costs) const;
00302 
00304     void pvGradUpdate();
00305 
00306 private:
00307     //#####  Private Member Functions  ########################################
00308 
00310     // (PLEASE IMPLEMENT IN .cc)
00311     void build_();
00312 
00313 private:
00314     //#####  Private Data Members  ############################################
00315 
00316     // The rest of the private stuff goes here
00317 
00318     Vec all_params; // all the parameters in one vector
00319     Vec all_params_delta; // update direction
00320     Vec all_params_gradient; // all the parameter gradients in one vector
00321     Vec all_mparams; // mean parameters (moving-averaged over past values)
00322     TVec<Mat> layer_params_gradient;
00323     TVec<Vec> layer_params_delta;
00324     TVec<Vec> group_params; // params of each group (pointing in all_params)
00325     TVec<Vec> group_params_delta; // params_delta of each group (pointing in all_params_delta)
00326     TVec<Vec> group_params_gradient; // params_delta of each group (pointing in all_params_gradient)
00327     Mat neuron_gradients; // one row per example of a minibatch, has concatenation of layer 0, layer 1, ... gradients.
00328     TVec<Mat> neuron_gradients_per_layer; // pointing into neuron_gradients (one row per example of a minibatch)
00329     mutable TVec<Mat> neuron_outputs_per_layer;  // same structure
00330     mutable TVec<Mat> neuron_extended_outputs_per_layer;  // with 1's in the first pseudo-neuron, for doing biases
00331     Mat targets; // one target row per example in a minibatch
00332     Vec example_weights; // one element per example in a minibatch
00333     Mat train_costs; // one row per example in a minibatch
00334 
00335     real* params_ptr;       // Raw pointer to the (shared) parameters.
00336     int params_id;          // Shared memory id for parameters.
00337     int* params_int_ptr;    // Raw pointer to the (shared) integer parameters.
00338     int params_int_id;      // Shared memory id for integer parameters.
00339 
00344     int nsteps;
00345 
00347     int semaphore_id;
00348 
00351     Vec params_update;
00352 
00355     TVec<Mat> layer_params_update;
00356 
00357     //PP<CorrelationProfiler> g_corrprof, ng_corrprof;    // for optional gradient correlation profiling
00358 
00360     void freeSharedMemory();
00361 };
00362 
00363 // Declares a few other classes and functions related to this class
00364 DECLARE_OBJECT_PTR(NatGradSMPNNet);
00365 
00366 } // end of namespace PLearn
00367 
00368 #endif
00369 
00370 
00371 /*
00372   Local Variables:
00373   mode:c++
00374   c-basic-offset:4
00375   c-file-style:"stroustrup"
00376   c-file-offsets:((innamespace . 0)(inline-open . 0))
00377   indent-tabs-mode:nil
00378   fill-column:79
00379   End:
00380 */
00381 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines