PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // NatGradNNet.h 00004 // 00005 // Copyright (C) 2007 Yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Yoshua Bengio 00036 00040 #ifndef NatGradNNet_INC 00041 #define NatGradNNet_INC 00042 00043 #include <plearn_learners/generic/PLearner.h> 00044 #include <plearn_learners/generic/GradientCorrector.h> 00045 #include <plearn/sys/Profiler.h> 00046 //#include "CorrelationProfiler.h" // *stat* 00047 00048 namespace PLearn { 00049 00053 class NatGradNNet : public PLearner 00054 { 00055 typedef PLearner inherited; 00056 00057 public: 00058 //##### Public Build Options ############################################ 00059 00060 int noutputs; 00061 00063 TVec<int> hidden_layer_sizes; 00064 00067 TVec<Mat> layer_params; 00069 TVec<Mat> layer_mparams; 00070 00072 real params_averaging_coeff; 00074 int params_averaging_freq; 00075 00077 real init_lrate; 00078 00080 real lrate_decay; 00081 00083 real output_layer_L1_penalty_factor; 00084 00086 real output_layer_lrate_scale; 00087 00089 int minibatch_size; 00090 00093 PP<GradientCorrector> neurons_natgrad_template; 00094 TVec<PP<GradientCorrector> > neurons_natgrad_per_layer; 00095 00098 PP<GradientCorrector> params_natgrad_template; 00106 PP<GradientCorrector> params_natgrad_per_input_template; 00107 00109 TVec<PP<GradientCorrector> > params_natgrad_per_group; 00110 00114 PP<GradientCorrector> full_natgrad; 00115 00117 string output_type; 00118 00123 real input_size_lrate_normalization_power; 00124 00129 real lrate_scale_factor; 00130 int lrate_scale_factor_max_power; 00131 int lrate_scale_factor_min_power; 00132 00136 bool self_adjusted_scaling_and_bias; 00137 real target_mean_activation; 00138 real target_stdev_activation; 00139 // the mean and variance of the activations is estimated by a moving 00140 // average with this coefficient (near 0 for very slow averaging) 00141 real activation_statistics_moving_average_coefficient; 00142 00143 // *stat* 00144 // Temporary stuff for getting a clue as to what's going on 00145 // Look for the marker '*stat*' in the code 00146 00147 // -Options- 00149 //int corr_profiling_start, corr_profiling_end; 00150 00151 // -Not options- 00152 //PP<CorrelationProfiler> g_corrprof, ng_corrprof; // for optional gradient correlation profiling 00153 //real sum_gradient_norms; // holds sum of the gradient norms - reset at each epoch 00154 //Vec all_params_cum_gradient; // holds the sum of the gradients - reset at each epoch 00155 00157 //TVec<VecStatsCollector> pa_gradstats; // one VecStatsCollector per output class 00158 00160 //TVec<int> pv_all_nsamples; 00161 //TVec< TMat<int> > pv_layer_nsamples; 00162 00163 // *stat* end 00164 00165 public: 00166 //##### Public Member Functions ######################################### 00167 00168 NatGradNNet(); 00169 00170 //##### PLearner Member Functions ####################################### 00171 00174 // (PLEASE IMPLEMENT IN .cc) 00175 virtual int outputsize() const; 00176 00180 // (PLEASE IMPLEMENT IN .cc) 00181 virtual void forget(); 00182 00186 // (PLEASE IMPLEMENT IN .cc) 00187 virtual void train(); 00188 00190 // (PLEASE IMPLEMENT IN .cc) 00191 virtual void computeOutput(const Vec& input, Vec& output) const; 00192 virtual void computeOutputs(const Mat& input, Mat& output) const; 00193 00194 virtual void computeOutputsAndCosts(const Mat& input, const Mat& target, 00195 Mat& output, Mat& costs) const; 00196 00197 00199 // (PLEASE IMPLEMENT IN .cc) 00200 virtual void computeCostsFromOutputs(const Vec& input, const Vec& output, 00201 const Vec& target, Vec& costs) const; 00202 00205 // (PLEASE IMPLEMENT IN .cc) 00206 virtual TVec<std::string> getTestCostNames() const; 00207 00210 // (PLEASE IMPLEMENT IN .cc) 00211 virtual TVec<std::string> getTrainCostNames() const; 00212 00213 00214 // *** SUBCLASS WRITING: *** 00215 // While in general not necessary, in case of particular needs 00216 // (efficiency concerns for ex) you may also want to overload 00217 // some of the following methods: 00218 // virtual void computeOutputAndCosts(const Vec& input, const Vec& target, 00219 // Vec& output, Vec& costs) const; 00220 // virtual void computeCostsOnly(const Vec& input, const Vec& target, 00221 // Vec& costs) const; 00222 // virtual void test(VMat testset, PP<VecStatsCollector> test_stats, 00223 // VMat testoutputs=0, VMat testcosts=0) const; 00224 // virtual int nTestCosts() const; 00225 // virtual int nTrainCosts() const; 00226 // virtual void resetInternalState(); 00227 // virtual bool isStatefulLearner() const; 00228 00229 00230 //##### PLearn::Object Protocol ######################################### 00231 00232 // Declares other standard object methods. 00233 // ### If your class is not instantiatable (it has pure virtual methods) 00234 // ### you should replace this by PLEARN_DECLARE_ABSTRACT_OBJECT_METHODS 00235 PLEARN_DECLARE_OBJECT(NatGradNNet); 00236 00237 // Simply calls inherited::build() then build_() 00238 virtual void build(); 00239 00241 // (PLEASE IMPLEMENT IN .cc) 00242 virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies); 00243 00244 protected: 00245 //##### Protected Options ############################################### 00246 00247 // ### Declare protected option fields (such as learned parameters) here 00248 00250 int n_layers; 00251 00253 TVec<int> layer_sizes; 00254 00256 TVec<Mat> biases; 00257 TVec<Mat> weights,mweights; 00258 TVec<Vec> activations_scaling; // output = tanh(activations_scaling[layer][neuron] * (biases[layer][neuron] + weights[layer]*input[layer-1]) 00259 TVec<Vec> mean_activations; 00260 TVec<Vec> var_activations; 00261 real cumulative_training_time; 00262 00263 protected: 00264 //##### Protected Member Functions ###################################### 00265 00267 // (PLEASE IMPLEMENT IN .cc) 00268 static void declareOptions(OptionList& ol); 00269 00271 void onlineStep(int t, const Mat& targets, Mat& train_costs, Vec example_weights); 00272 00275 void fpropNet(int n_examples, bool during_training) const; 00276 00279 void fbpropLoss(const Mat& output, const Mat& target, const Vec& example_weights, Mat& train_costs) const; 00280 00281 00282 private: 00283 //##### Private Member Functions ######################################## 00284 00286 // (PLEASE IMPLEMENT IN .cc) 00287 void build_(); 00288 00289 private: 00290 //##### Private Data Members ############################################ 00291 00292 // The rest of the private stuff goes here 00293 00294 Vec all_params; // all the parameters in one vector 00295 Vec all_params_delta; // update direction 00296 Vec all_params_gradient; // all the parameter gradients in one vector 00297 Vec all_mparams; // mean parameters (moving-averaged over past values) 00298 TVec<Mat> layer_params_gradient; 00299 TVec<Vec> layer_params_delta; 00300 TVec<Vec> group_params; // params of each group (pointing in all_params) 00301 TVec<Vec> group_params_delta; // params_delta of each group (pointing in all_params_delta) 00302 TVec<Vec> group_params_gradient; // params_delta of each group (pointing in all_params_gradient) 00303 Mat neuron_gradients; // one row per example of a minibatch, has concatenation of layer 0, layer 1, ... gradients. 00304 TVec<Mat> neuron_gradients_per_layer; // pointing into neuron_gradients (one row per example of a minibatch) 00305 mutable TVec<Mat> neuron_outputs_per_layer; // same structure 00306 mutable TVec<Mat> neuron_extended_outputs_per_layer; // with 1's in the first pseudo-neuron, for doing biases 00307 Mat targets; // one target row per example in a minibatch 00308 Vec example_weights; // one element per example in a minibatch 00309 Mat train_costs; // one row per example in a minibatch 00310 00311 }; 00312 00313 // Declares a few other classes and functions related to this class 00314 DECLARE_OBJECT_PTR(NatGradNNet); 00315 00316 } // end of namespace PLearn 00317 00318 #endif 00319 00320 00321 /* 00322 Local Variables: 00323 mode:c++ 00324 c-basic-offset:4 00325 c-file-style:"stroustrup" 00326 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00327 indent-tabs-mode:nil 00328 fill-column:79 00329 End: 00330 */ 00331 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :