PLearn 0.1
NeighborhoodSmoothnessNNet.h
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NeighborhoodSmoothnessNNet.h
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* *******************************************************      
00038  * $Id: NeighborhoodSmoothnessNNet.h 9419 2008-09-02 16:48:08Z nouiz $
00039  ******************************************************* */
00040 
00043 #ifndef NeighborhoodSmoothnessNNet_INC
00044 #define NeighborhoodSmoothnessNNet_INC
00045 
00046 #include "PLearner.h"
00047 #include <plearn/opt/Optimizer.h>
00048 //#include "Var_all.h"
00049 
00050 namespace PLearn {
00051 using namespace std;
00052 
00053 class NeighborhoodSmoothnessNNet: public PLearner
00054 {
00055 
00056 protected:
00057 
00058     Var input;  // Var(inputsize())
00059     Var target; // Var(targetsize()-weightsize())
00060     Var sampleweight; // Var(1) if train_set->hasWeights()
00061     Var w1; // bias and weights of first hidden layer
00062     Var w2; // bias and weights of second hidden layer
00063     Var wout; // bias and weights of output layer
00064     Var wdirect; // bias and weights for direct in-to-out connection
00065     Var last_hidden; // last hidden layer (the one to smooth)
00066     Var output; // output (P(y_i|x_i)) for a single bag element
00067     Var bag_size; // filled up by SumOverBagsVariable
00068     Var bag_inputs; // filled up by SumOverBagsVariable
00069     Var bag_output; // P(y=1|bag_inputs)
00070     Var bag_hidden; // The hidden layers of all inputs in a bag.
00071     mutable int test_bag_size; // BECAUSE OF UGLY HACK IN computOutputAndCost (look at it, it's worth it!)
00072     Func invars_to_training_cost; // (bag inputs and targets) -> training cost
00073 
00074     VarArray costs; // (negative log-likelihood, classification error) for the bag
00075     VarArray penalties;
00076     Var training_cost; // weighted cost + penalties
00077     Var test_costs; // hconcat(costs)
00078     VarArray invars;
00079     VarArray params;  // all arameter input vars
00080     Vec paramsvalues; // values of all parameters
00081 
00082     Var p_ij;       // The probabilities p_ij on the inputs.
00083 
00084 public:
00085 
00086     mutable Func f; // input -> output
00087     Func f_input_to_hidden; // input -> hidden
00088     mutable Func test_costf; // input & target -> output & test_costs
00089     mutable Func output_and_target_to_cost; // output & target -> cost
00090 
00091 public:
00092     
00093     typedef PLearner inherited;
00094 
00095     // Build options inherited from learner:
00096     // inputsize, outputszie, targetsize, experiment_name, save_at_every_epoch 
00097 
00098     // Build options:    
00099     int max_n_instances; // maximum number of instances (input vectors x_i) allowed
00100 
00101     int nhidden;    // number of hidden units in first hidden layer (default:0)
00102     int nhidden2;   // number of hidden units in second hidden layer (default:0)
00103     int noutputs;   // number of output units (outputsize)
00104 
00105     real sigma_hidden;
00106     real sne_weight;
00107 
00108     real weight_decay; // default: 0
00109     real bias_decay;   // default: 0 
00110     real layer1_weight_decay; // default: MISSING_VALUE
00111     real layer1_bias_decay;   // default: MISSING_VALUE
00112     real layer2_weight_decay; // default: MISSING_VALUE
00113     real layer2_bias_decay;   // default: MISSING_VALUE
00114     real output_layer_weight_decay; // default: MISSING_VALUE
00115     real output_layer_bias_decay;   // default: MISSING_VALUE
00116     real direct_in_to_out_weight_decay; // default: MISSING_VALUE
00117     real classification_regularizer; // default: 0
00118 
00119     string penalty_type; // default: "L2_square"
00120     bool L1_penalty; // default: false - deprecated, set "penalty_type" to "L1"
00121     bool direct_in_to_out; // should we include direct input to output connecitons? default: false
00122     string output_transfer_func; // tanh, sigmoid, softplus, softmax  (default: "" means no transfer function)
00123     real interval_minval, interval_maxval; // if output_transfer_func = interval(minval,maxval), these are the interval bounds
00124 
00126     // where the cost functions can be one of mse, mse_onehot, NLL,
00127     // class_error or multiclass_error (no default)
00128     Array<string> cost_funcs;  
00129 
00130     // Build options related to the optimization:
00131     PP<Optimizer> optimizer; // the optimizer to use (no default)
00132 
00133     int batch_size; // how many samples to use to estimate gradient before an update
00134                     // 0 means the whole training set (default: 1)
00135 
00136 private:
00137 
00138     void build_();
00139 
00140 public:
00141 
00142     NeighborhoodSmoothnessNNet();
00143     virtual ~NeighborhoodSmoothnessNNet();
00144     PLEARN_DECLARE_OBJECT(NeighborhoodSmoothnessNNet);
00145 
00146     virtual void build();
00147     virtual void forget(); // simply calls initializeParams()
00148 
00149     virtual int outputsize() const;
00150     virtual TVec<string> getTrainCostNames() const;
00151     virtual TVec<string> getTestCostNames() const;
00152 
00153     virtual void train();
00154 
00155     virtual void setTrainingSet(VMat training_set, bool call_forget=true);
00156 
00157     virtual void computeOutput(const Vec& input, Vec& output) const;
00158     
00159     virtual void computeOutputAndCosts(const Vec& input, const Vec& target,
00160                                        Vec& output, Vec& costs) const;
00161 
00162     virtual void computeCostsFromOutputs(const Vec& input, const Vec& output, 
00163                                          const Vec& target, Vec& costs) const;
00164 
00165     virtual void makeDeepCopyFromShallowCopy(CopiesMap &copies);
00166 
00167 protected:
00168     static void declareOptions(OptionList& ol);
00169     void initializeParams();
00170 
00171 };
00172 
00173 DECLARE_OBJECT_PTR(NeighborhoodSmoothnessNNet);
00174 
00175 } // end of namespace PLearn
00176 
00177 #endif
00178 
00179 
00180 /*
00181   Local Variables:
00182   mode:c++
00183   c-basic-offset:4
00184   c-file-style:"stroustrup"
00185   c-file-offsets:((innamespace . 0)(inline-open . 0))
00186   indent-tabs-mode:nil
00187   fill-column:79
00188   End:
00189 */
00190 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines