PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // DeepNNet.h 00004 // 00005 // Copyright (C) 2005 Yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: DeepNNet.h 3994 2005-08-25 13:35:03Z chapados $ 00037 ******************************************************* */ 00038 00039 // Authors: Yoshua Bengio 00040 00044 #ifndef DeepNNet_INC 00045 #define DeepNNet_INC 00046 00047 #include <plearn_learners/generic/PLearner.h> 00048 00049 namespace PLearn { 00050 00051 class DeepNNet: public PLearner 00052 { 00053 00054 private: 00055 00056 typedef PLearner inherited; 00057 00058 protected: 00059 00060 // ********************* 00061 // * protected options * 00062 // ********************* 00063 00064 // ### declare protected option fields (such as learnt parameters) here 00065 00066 TVec<TVec<TVec<int> > > sources; // at [l][i] indices of inputs to neuron i of layer l 00067 TVec<TVec<Vec > > weights; // at [l][i] input weight vector of neuron i of layer l 00068 TVec<Vec> biases; // at [l][i] bias of neuron i of layer l 00069 Vec layerwise_lr_factor; // at [l] the multiplicative factor on the weights 00070 real training_time; 00071 00072 // temporary 00073 mutable TVec<Vec> activations; // at [l] output of non-linearity of layer l, including the input AND the output layer 00074 TVec<Vec> activations_gradients; // gradients of the above (for hidden and output layers, NOT the input layer) 00075 TVec<Mat> avg_weight_gradients; // at [l] average of norm of gradients on all existing and potential connections 00076 Vec layerwise_gradient_norm_ma; // at [l] moving average of the norm of the weight gradient on that layer 00077 Vec layerwise_gradient_norm; // at [l] sum of the weight gradient squared, on that layer 00078 TVec<int> n_weights_of_layer; // number of weights in layer l 00079 real learning_rate; 00080 00081 public: 00082 00083 // ************************ 00084 // * public build options * 00085 // ************************ 00086 00087 // ### declare public option fields (such as build options) here 00088 00089 int n_layers; // this counts the output layer but not the input layer 00090 int n_outputs; 00091 int default_n_units_per_hidden_layer; // optionally to initialize n_units_per_layer 00092 TVec<int> n_units_per_layer; 00093 real L1_regularizer; // amount of penalty on sum_{l,i,j} |weights[l][i][j]| 00094 real initial_learning_rate; 00095 real learning_rate_decay; 00096 real layerwise_learning_rate_adaptation; 00097 bool normalize_per_unit; 00098 bool normalize_percentage; 00099 bool normalize_activations; 00100 string output_cost; // implies a non-linearity for outputs: "mse" -> linear, "nll" -> softmax 00101 bool add_connections; // if true, instanciate potential connections with greater 00102 // average gradient than the existing connections with the smallest average gradient 00103 bool remove_connections; // remove the weaker existing connections (smaller absolute value) 00104 real initial_sparsity; // initial fraction of weights that are 0 00105 int connections_adaptation_frequency; // after how many examples do we try to adapt connections? 0=train set size. 00106 real init_scale; // scaling factor of weights initialization 00107 00108 // **************** 00109 // * Constructors * 00110 // **************** 00111 00113 // (Make sure the implementation in the .cc 00114 // initializes all fields to reasonable default values) 00115 DeepNNet(); 00116 00117 00118 // ******************** 00119 // * PLearner methods * 00120 // ******************** 00121 00122 private: 00123 00125 // (Please implement in .cc) 00126 void build_(); 00127 00128 protected: 00129 00131 // (Please implement in .cc) 00132 static void declareOptions(OptionList& ol); 00133 00134 public: 00135 00136 // ************************ 00137 // **** Object methods **** 00138 // ************************ 00139 00141 virtual void build(); 00142 00144 virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies); 00145 00146 // Declares other standard object methods. 00147 // If your class is not instantiatable (it has pure virtual methods) 00148 // you should replace this by PLEARN_DECLARE_ABSTRACT_OBJECT. 00149 PLEARN_DECLARE_OBJECT(DeepNNet); 00150 00151 00152 // ************************** 00153 // **** PLearner methods **** 00154 // ************************** 00155 00158 // (PLEASE IMPLEMENT IN .cc) 00159 virtual int outputsize() const; 00160 00163 // (PLEASE IMPLEMENT IN .cc) 00164 virtual void forget(); 00165 00166 00169 // (PLEASE IMPLEMENT IN .cc) 00170 virtual void train(); 00171 00172 00174 // (PLEASE IMPLEMENT IN .cc) 00175 virtual void computeOutput(const Vec& input, Vec& output) const; 00176 00178 // (PLEASE IMPLEMENT IN .cc) 00179 virtual void computeCostsFromOutputs(const Vec& input, const Vec& output, 00180 const Vec& target, Vec& costs) const; 00181 00182 00184 // (PLEASE IMPLEMENT IN .cc) 00185 virtual TVec<std::string> getTestCostNames() const; 00186 00189 // (PLEASE IMPLEMENT IN .cc) 00190 virtual TVec<std::string> getTrainCostNames() const; 00191 00192 00193 // *** SUBCLASS WRITING: *** 00194 // While in general not necessary, in case of particular needs 00195 // (efficiency concerns for ex) you may also want to overload 00196 // some of the following methods: 00197 // virtual void computeOutputAndCosts(const Vec& input, const Vec& target, Vec& output, Vec& costs) const; 00198 // virtual void computeCostsOnly(const Vec& input, const Vec& target, Vec& costs) const; 00199 // virtual void test(VMat testset, PP<VecStatsCollector> test_stats, VMat testoutputs=0, VMat testcosts=0) const; 00200 // virtual int nTestCosts() const; 00201 // virtual int nTrainCosts() const; 00202 // virtual void resetInternalState(); 00203 // virtual bool isStatefulLearner() const; 00204 00205 // propagate activations from activations[0] to activations[n_layers] 00206 void fprop() const; 00207 00208 // initialize with random connectivity and random weights and 0 biases 00209 void initializeParams(bool set_seed=true); 00210 }; 00211 00212 // Declares a few other classes and functions related to this class. 00213 DECLARE_OBJECT_PTR(DeepNNet); 00214 00215 } // end of namespace PLearn 00216 00217 #endif 00218 00219 00220 /* 00221 Local Variables: 00222 mode:c++ 00223 c-basic-offset:4 00224 c-file-style:"stroustrup" 00225 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00226 indent-tabs-mode:nil 00227 fill-column:79 00228 End: 00229 */ 00230 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :