PLearn 0.1
IncrementalNNet.h
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // IncrementalNNet.h
00004 //
00005 // Copyright (C) 2005 Yoshua Bengio, Mantas Lukosevicius
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: IncrementalNNet.h 3994 2005-08-25 13:35:03Z chapados $ 
00037  ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio, Mantas Lukosevicius
00040 
00044 #ifndef IncrementalNNet_INC
00045 #define IncrementalNNet_INC
00046 
00047 #include <plearn_learners/generic/PLearner.h>
00048 
00049 namespace PLearn {
00050 
00051 class IncrementalNNet: public PLearner
00052 {
00053 
00054 private:
00055 
00056     typedef PLearner inherited;
00057   
00058 protected:
00059 
00060     // *********************
00061     // * protected options *
00062     // *********************
00063 
00064     // ### declare protected option fields (such as learnt parameters) here
00065 
00066     Mat direct_weights; // direct connections from input to output
00067     Mat direct_weight_gradients;
00068     Mat output_weights; // [hidden_unit, output] ** NOTE IT IS TRANSPOSED ** so can easily add hidden units
00069     Mat output_weight_gradients;
00070     Vec output_biases;
00071     Mat hidden_layer_weights; // [hidden_unit, input]
00072     Mat hidden_layer_weight_gradients; 
00073     Vec hidden_layer_biases; // [hidden_unit]
00074     TVec<Vec> internal_weights;    // among hidden units [to, from]. enabled by enable_internal_weights
00075     TVec<Vec> internal_weight_gradients;    
00076     Vec candidate_unit_weights;
00077     Vec candidate_unit_weight_gradients; 
00078     real candidate_unit_bias;
00079     Vec candidate_unit_output_weights;
00080     Vec candidate_unit_output_weight_gradients;
00081     Vec candidate_unit_internal_weights;
00082     Vec candidate_unit_internal_weight_gradients;
00083     int n_examples_seen;
00084     real current_average_cost;
00085     real next_average_cost;
00086     int n_examples_training_candidate;
00087     int current_example;
00088     
00089     real moving_average_coefficient; // = 1.0/minibatchsize;
00090     real learning_rate; // = initial_learning_rate / ( 1 + n_examples_seen * decay_factor );
00091 public:
00092 
00093     // ************************
00094     // * public build options *
00095     // ************************
00096 
00097     // ### declare public option fields (such as build options) here
00098 
00099     int n_outputs;
00100     real output_weight_decay; // L1 regularizer's penalty factor on output weights
00101     bool online; // use online or batch version? if batch only consider adding a hidden unit after minibatch_size examples
00102     // Add a hidden unit only if it would reduce the average cost (including the L1 penalty).
00103     // This average is calculated either with a moving average over a moving target (online version)
00104     // or the algorithm proceeds in two phases (batch version): on even batches one improves the
00105     // tentative hidden unit, while on odd batches one evaluates its quality.
00106     int minibatch_size; // see above; 0 is a special value meaning minibatch_size == training set size
00107     string output_cost_type; // "squared_error", "hinge_loss", "discrete_log_likelihood" (for probabilistic classification)
00108     bool boosting; // use a boosting-like approach (only works with online=false and minimize_local_cost=true) and train the 
00109     // new hidden unit  but not the previous ones; also descend not the actual cost but a weighted cost obtained
00110     // from the gradient of the output cost on the hidden unit function (see minimize_local_cost option).
00111     bool minimize_local_cost; // if true then instead of minimize global cost sum_t Q(f(x_t),y_t),
00112     // each hidden unit minimizes sum_t Q'(f(x_t),y_t) h(x_t)
00113     // or some approximation of it if h is a hard threshold (weighted logistic regression cost
00114     // with targets sign(Q'(f(x_t),y_t)) and weights |Q'(f(x_t),y_t)|),
00115     // where Q is the output cost, f(x_t) is the current prediction, y_t the target, h(x_t) the
00116     // output of the new hidden unit.
00117     bool hard_activation_function; // if true then h(x) = sign(w'x + b), else h(x) = tanh(w'x + b)
00118     bool use_hinge_loss_for_hard_activation; // use hinge loss or cross-entropy to train hidden units, when hard_activation_function
00119     real initial_learning_rate; // learning_rate = initial_learning_rate / (1 + n_examples_seen * decay_factor);
00120     real decay_factor;
00121     real max_n_epochs_to_fail; // Maximum number of epochs (not necessarily an integer) to try improving the new hidden unit
00122     // before declaring failure to improve the regularized cost (and thus stopping training).
00123                              
00124     real rand_range;           // Interval of random numbers when initializing weights/biases: (-rand_range/2, rand_range/2).
00125     bool enable_internal_weights; // Network has a cascade topology if true, or one hidden layer if false (default).
00126     bool incremental_connections; // Add connections incrementally if true, or all at once if false (default). 
00127     // This option is only supported with n_outputs == 1. 
00128     real connection_gradient_threshold;    // Threshold of gradient for connection to be added, when incremental_connections == true.
00129     real connection_removing_threshold;   // Connections are removed for which |weight|+|MAgradient| < connection_removing_threshold.
00130     // Default value is 0 (connections are not removed). Ednabled by incremental_connections.
00131     bool residual_correlation_gradient;  // Use residual correlation gradient (ConvexNN) if true, or classical error 
00132     // back-propagation if false.
00133   
00134     // ** NON-OPTION FIELDS
00135     //
00136     Vec linear_output; // output before possible output-non-linearity = output_weights * h(x) + output_biases
00137     Vec act; // weighted sum of inputs on hidden units, before non-linearity
00138     Vec h; // output of hidden units after hidden unit non-linearity
00139     int cost_type; // 0 = squared_error, 1 = hinge_loss, 2 = discrete_log_likelihood
00140 
00141     // ****************
00142     // * Constructors *
00143     // ****************
00144 
00146     // (Make sure the implementation in the .cc
00147     // initializes all fields to reasonable default values)
00148     IncrementalNNet();
00149 
00150 
00151     // ********************
00152     // * PLearner methods *
00153     // ********************
00154 
00155 private: 
00156 
00158     // (Please implement in .cc)
00159     void build_();
00160 
00161 protected: 
00162   
00164     // (Please implement in .cc)
00165     static void declareOptions(OptionList& ol);
00166 
00167 public:
00168 
00169     // ************************
00170     // **** Object methods ****
00171     // ************************
00172 
00174     virtual void build();
00175 
00177     virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies);
00178 
00179     // Declares other standard object methods.
00180     // If your class is not instantiatable (it has pure virtual methods)
00181     // you should replace this by PLEARN_DECLARE_ABSTRACT_OBJECT.
00182     PLEARN_DECLARE_OBJECT(IncrementalNNet);
00183 
00184 
00185     // **************************
00186     // **** PLearner methods ****
00187     // **************************
00188 
00191     // (PLEASE IMPLEMENT IN .cc)
00192     virtual int outputsize() const;
00193 
00196     // (PLEASE IMPLEMENT IN .cc)
00197     virtual void forget();
00198 
00199     
00202     // (PLEASE IMPLEMENT IN .cc)
00203     virtual void train();
00204 
00205 
00207     // (PLEASE IMPLEMENT IN .cc)
00208     virtual void computeOutput(const Vec& input, Vec& output) const;
00209 
00211     // (PLEASE IMPLEMENT IN .cc)
00212     virtual void computeCostsFromOutputs(const Vec& input, const Vec& output, 
00213                                          const Vec& target, Vec& costs) const;
00214                                 
00215 
00217     // (PLEASE IMPLEMENT IN .cc)
00218     virtual TVec<std::string> getTestCostNames() const;
00219 
00222     // (PLEASE IMPLEMENT IN .cc)
00223     virtual TVec<std::string> getTrainCostNames() const;
00224 
00225 
00226     // *** SUBCLASS WRITING: ***
00227     // While in general not necessary, in case of particular needs 
00228     // (efficiency concerns for ex) you may also want to overload
00229     // some of the following methods:
00230     // virtual void computeOutputAndCosts(const Vec& input, const Vec& target, Vec& output, Vec& costs) const;
00231     // virtual void computeCostsOnly(const Vec& input, const Vec& target, Vec& costs) const;
00232     // virtual void test(VMat testset, PP<VecStatsCollector> test_stats, VMat testoutputs=0, VMat testcosts=0) const;
00233     // virtual int nTestCosts() const;
00234     // virtual int nTrainCosts() const;
00235     // virtual void resetInternalState();
00236     // virtual bool isStatefulLearner() const;
00237 
00238     virtual real output_loss(const Vec& output,const Vec& target) const; // compute output loss, according to output_loss_type
00239 
00240     // compute doutput_loss/doutput in output_gradient
00241     virtual void output_loss_gradient(const Vec& output,const Vec& target,
00242                                       Vec output_gradient, real sampleweight) const;
00243   
00244     // update moving avarage gradients on connections, add/remove some connections, train existant ones.
00245     // works for input connections of a single unit. 
00246     void update_incremental_connections( Vec weights, Vec MAgradients, const Vec& input, real gradient ) const;
00247 
00248     void residual_correlation_output_gradient( Vec MAgradients,  const Vec& weights, const Vec& output_gradient, 
00249                                                real activation, real& hidden_gradient ) const;
00250     
00251 };
00252 
00253 // Declares a few other classes and functions related to this class.
00254 DECLARE_OBJECT_PTR(IncrementalNNet);
00255   
00256 } // end of namespace PLearn
00257 
00258 #endif
00259 
00260 
00261 /*
00262   Local Variables:
00263   mode:c++
00264   c-basic-offset:4
00265   c-file-style:"stroustrup"
00266   c-file-offsets:((innamespace . 0)(inline-open . 0))
00267   indent-tabs-mode:nil
00268   fill-column:79
00269   End:
00270 */
00271 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines