PLearn 0.1
NeighborhoodSmoothnessNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NeighborhoodSmoothnessNNet.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* *******************************************************      
00038  * $Id: NeighborhoodSmoothnessNNet.cc 8321 2007-11-28 21:37:09Z nouiz $
00039  ******************************************************* */
00040 
00041 
00042 
00043 #include <plearn/var/AffineTransformVariable.h>
00044 #include <plearn/var/AffineTransformWeightPenalty.h>
00045 #include <plearn/var/BinaryClassificationLossVariable.h>
00046 #include <plearn/var/ClassificationLossVariable.h>
00047 #include <plearn/var/ColumnSumVariable.h>
00048 #include <plearn/var/ConcatColumnsVariable.h>
00049 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00050 #include <plearn/var/CrossEntropyVariable.h>
00051 #include <plearn/var/DotProductVariable.h>
00052 #include <plearn/var/ExpVariable.h>
00053 #include <plearn/var/InvertElementsVariable.h>
00054 #include <plearn/var/LogVariable.h>
00055 #include <plearn/var/LiftOutputVariable.h>
00056 #include <plearn/var/LogSoftmaxVariable.h>
00057 #include <plearn/var/MinusVariable.h>
00058 #include <plearn/var/MulticlassLossVariable.h>
00059 #include <plearn/var/NegateElementsVariable.h>
00060 #include <plearn/var/NegCrossEntropySigmoidVariable.h>
00061 #include "NeighborhoodSmoothnessNNet.h"
00062 #include <plearn/var/OneHotSquaredLoss.h>
00063 #include <plearn/base/ProgressBar.h>
00064 #include <plearn/math/random.h>
00065 #include <plearn/var/SigmoidVariable.h>
00066 #include <plearn/var/SoftmaxVariable.h>
00067 #include <plearn/var/SoftplusVariable.h>
00068 #include <plearn/var/SumVariable.h>
00069 #include <plearn/var/SumAbsVariable.h>
00070 #include <plearn/var/SumOfVariable.h>
00071 #include <plearn/var/SumOverBagsVariable.h>
00072 #include <plearn/var/SumSquareVariable.h>
00073 #include <plearn/var/SubMatVariable.h>
00074 #include <plearn/var/SubMatTransposeVariable.h>
00075 #include <plearn/vmat/SubVMatrix.h>
00076 #include <plearn/var/TanhVariable.h>
00077 #include <plearn/var/TimesVariable.h>
00078 #include <plearn/var/TimesScalarVariable.h>
00079 #include <plearn/var/TransposeProductVariable.h>
00080 #include <plearn/var/UnfoldedFuncVariable.h>
00081 #include <plearn/var/UnfoldedSumOfVariable.h>
00082 #include <plearn/var/Var_operators.h>
00083 #include <plearn/var/Var_utils.h>
00084 
00085 //#include "DisplayUtils.h"
00086 //#include "GradientOptimizer.h"
00087 
00088 namespace PLearn {
00089 using namespace std;
00090 
00091 PLEARN_IMPLEMENT_OBJECT(NeighborhoodSmoothnessNNet, 
00092                         "Feedforward neural network whose hidden units are smoothed according to input neighborhood\n",
00093                         "TODO"
00094     );
00095 
00096 NeighborhoodSmoothnessNNet::NeighborhoodSmoothnessNNet() // DEFAULT VALUES FOR ALL OPTIONS
00097     :
00098 test_bag_size(0),
00099 max_n_instances(1),
00100 nhidden(0),
00101 nhidden2(0),
00102 noutputs(0),
00103 sigma_hidden(0.1),
00104 sne_weight(0),
00105 weight_decay(0),
00106 bias_decay(0),
00107 layer1_weight_decay(0),
00108 layer1_bias_decay(0),
00109 layer2_weight_decay(0),
00110 layer2_bias_decay(0),
00111 output_layer_weight_decay(0),
00112 output_layer_bias_decay(0),
00113 direct_in_to_out_weight_decay(0),
00114 penalty_type("L2_square"),
00115 L1_penalty(false),
00116 direct_in_to_out(false),
00117 output_transfer_func(""),
00118 interval_minval(0), interval_maxval(1),
00119 batch_size(1)
00120 {}
00121 
00122 NeighborhoodSmoothnessNNet::~NeighborhoodSmoothnessNNet()
00123 {
00124 }
00125 
00126 void NeighborhoodSmoothnessNNet::declareOptions(OptionList& ol)
00127 {
00128     declareOption(ol, "max_n_instances", &NeighborhoodSmoothnessNNet::max_n_instances, OptionBase::buildoption, 
00129                   "    maximum number of instances (input vectors x_i) allowed\n");
00130 
00131     declareOption(ol, "nhidden", &NeighborhoodSmoothnessNNet::nhidden, OptionBase::buildoption, 
00132                   "    number of hidden units in first hidden layer (0 means no hidden layer)\n");
00133 
00134     declareOption(ol, "nhidden2", &NeighborhoodSmoothnessNNet::nhidden2, OptionBase::buildoption, 
00135                   "    number of hidden units in second hidden layer (0 means no hidden layer)\n");
00136 
00137     declareOption(ol, "sne_weight", &NeighborhoodSmoothnessNNet::sne_weight, OptionBase::buildoption, 
00138                   "    The weight of the SNE cost in the total cost optimized.");
00139 
00140     declareOption(ol, "sigma_hidden", &NeighborhoodSmoothnessNNet::sigma_hidden, OptionBase::buildoption, 
00141                   "    The bandwidth of the Gaussian kernel used to compute the similarity\n"
00142                   "    between hidden layers.");
00143 
00144     declareOption(ol, "noutputs", &NeighborhoodSmoothnessNNet::noutputs, OptionBase::buildoption, 
00145                   "    number of output units. This gives this learner its outputsize.\n"
00146                   "    It is typically of the same dimensionality as the target for regression problems \n"
00147                   "    But for classification problems where target is just the class number, noutputs is \n"
00148                   "    usually of dimensionality number of classes (as we want to output a score or probability \n"
00149                   "    vector, one per class)");
00150 
00151     declareOption(ol, "weight_decay", &NeighborhoodSmoothnessNNet::weight_decay, OptionBase::buildoption, 
00152                   "    global weight decay for all layers\n");
00153 
00154     declareOption(ol, "bias_decay", &NeighborhoodSmoothnessNNet::bias_decay, OptionBase::buildoption, 
00155                   "    global bias decay for all layers\n");
00156 
00157     declareOption(ol, "layer1_weight_decay", &NeighborhoodSmoothnessNNet::layer1_weight_decay, OptionBase::buildoption, 
00158                   "    Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00159     declareOption(ol, "layer1_bias_decay", &NeighborhoodSmoothnessNNet::layer1_bias_decay, OptionBase::buildoption, 
00160                   "    Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00161 
00162     declareOption(ol, "layer2_weight_decay", &NeighborhoodSmoothnessNNet::layer2_weight_decay, OptionBase::buildoption, 
00163                   "    Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00164 
00165     declareOption(ol, "layer2_bias_decay", &NeighborhoodSmoothnessNNet::layer2_bias_decay, OptionBase::buildoption, 
00166                   "    Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00167 
00168     declareOption(ol, "output_layer_weight_decay", &NeighborhoodSmoothnessNNet::output_layer_weight_decay, OptionBase::buildoption, 
00169                   "    Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00170 
00171     declareOption(ol, "output_layer_bias_decay", &NeighborhoodSmoothnessNNet::output_layer_bias_decay, OptionBase::buildoption, 
00172                   "    Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00173 
00174     declareOption(ol, "direct_in_to_out_weight_decay", &NeighborhoodSmoothnessNNet::direct_in_to_out_weight_decay, OptionBase::buildoption, 
00175                   "    Additional weight decay for the direct in-to-out layer.  Is added to 'weight_decay'.\n");
00176 
00177     declareOption(ol, "penalty_type", &NeighborhoodSmoothnessNNet::penalty_type,
00178                   OptionBase::buildoption,
00179                   "    Penalty to use on the weights (for weight and bias decay).\n"
00180                   "    Can be any of:\n"
00181                   "      - \"L1\": L1 norm,\n"
00182                   "      - \"L1_square\": square of the L1 norm,\n"
00183                   "      - \"L2_square\" (default): square of the L2 norm.\n");
00184 
00185     declareOption(ol, "L1_penalty", &NeighborhoodSmoothnessNNet::L1_penalty, OptionBase::buildoption, 
00186                   "    Deprecated - You should use \"penalty_type\" instead\n"
00187                   "    should we use L1 penalty instead of the default L2 penalty on the weights?\n");
00188 
00189     declareOption(ol, "direct_in_to_out", &NeighborhoodSmoothnessNNet::direct_in_to_out, OptionBase::buildoption, 
00190                   "    should we include direct input to output connections?\n");
00191 
00192     declareOption(ol, "output_transfer_func", &NeighborhoodSmoothnessNNet::output_transfer_func, OptionBase::buildoption, 
00193                   "    what transfer function to use for ouput layer? \n"
00194                   "    one of: tanh, sigmoid, exp, softplus, softmax \n"
00195                   "    or interval(<minval>,<maxval>), which stands for\n"
00196                   "    <minval>+(<maxval>-<minval>)*sigmoid(.).\n"
00197                   "    An empty string or \"none\" means no output transfer function \n");
00198 
00199     declareOption(ol, "cost_funcs", &NeighborhoodSmoothnessNNet::cost_funcs, OptionBase::buildoption, 
00200                   "    a list of cost functions to use\n"
00201                   "    in the form \"[ cf1; cf2; cf3; ... ]\" where each function is one of: \n"
00202                   "      mse (for regression)\n"
00203                   "      mse_onehot (for classification)\n"
00204                   "      NLL (negative log likelihood -log(p[c]) for classification) \n"
00205                   "      class_error (classification error) \n"
00206                   "      binary_class_error (classification error for a 0-1 binary classifier)\n"
00207                   "      multiclass_error\n"
00208                   "      cross_entropy (for binary classification)\n"
00209                   "      stable_cross_entropy (more accurate backprop and possible regularization, for binary classification)\n"
00210                   "      lift_output (not a real cost function, just the output for lift computation)\n"
00211                   "    The first function of the list will be used as \n"
00212                   "    the objective function to optimize \n"
00213                   "    (possibly with an added weight decay penalty) \n");
00214   
00215     declareOption(ol, "classification_regularizer", &NeighborhoodSmoothnessNNet::classification_regularizer, OptionBase::buildoption, 
00216                   "    used only in the stable_cross_entropy cost function, to fight overfitting (0<=r<1)\n");
00217 
00218     declareOption(ol, "optimizer", &NeighborhoodSmoothnessNNet::optimizer, OptionBase::buildoption, 
00219                   "    specify the optimizer to use\n");
00220 
00221     declareOption(ol, "batch_size", &NeighborhoodSmoothnessNNet::batch_size, OptionBase::buildoption, 
00222                   "    how many samples to use to estimate the avergage gradient before updating the weights\n"
00223                   "    0 is equivalent to specifying training_set->n_non_missing_rows() \n");
00224     // TODO Not really, since the matrix given typically has much more rows (KNNVMatrix) than input samples.
00225 
00226     declareOption(ol, "paramsvalues", &NeighborhoodSmoothnessNNet::paramsvalues, OptionBase::learntoption, 
00227                   "    The learned parameter vector\n");
00228 
00229     inherited::declareOptions(ol);
00230 
00231 }
00232 
00234 // build //
00236 void NeighborhoodSmoothnessNNet::build()
00237 {
00238     inherited::build();
00239     build_();
00240 }
00241 
00243 // build_ //
00245 void NeighborhoodSmoothnessNNet::build_()
00246 {
00247     /*
00248      * Create Topology Var Graph
00249      */
00250 
00251     // Don't do anything if we don't have a train_set
00252     // It's the only one who knows the inputsize and targetsize anyway...
00253 
00254     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00255     {
00256 
00257         // init. basic vars
00258         int true_inputsize = inputsize(); // inputsize is now true inputsize 
00259         bag_inputs = Var(max_n_instances, inputsize() + 1);
00260         // The input (with pij) is the first column of the bag inputs.
00261         Var input_and_pij = subMat(bag_inputs, 0, 0, 1, bag_inputs->width());
00262         input = new SubMatTransposeVariable(input_and_pij, 0, 0, 1, true_inputsize);
00263         output = input;
00264         params.resize(0);
00265 
00266         // first hidden layer
00267         if(nhidden>0)
00268         {
00269             w1 = Var(1 + true_inputsize, nhidden, "w1");      
00270             output = tanh(affine_transform(output,w1));
00271             params.append(w1);
00272             last_hidden = output;
00273         }
00274 
00275         // second hidden layer
00276         if(nhidden2>0)
00277         {
00278             w2 = Var(1+nhidden, nhidden2, "w2");
00279             output = tanh(affine_transform(output,w2));
00280             params.append(w2);
00281             last_hidden = output;
00282         }
00283 
00284         if (nhidden==0)
00285             PLERROR("NeighborhoodSmoothnessNNet:: there must be hidden units!",nhidden2);
00286       
00287 
00288         // output layer before transfer function
00289 
00290         wout = Var(1+output->size(), outputsize(), "wout");
00291         output = affine_transform(output,wout);
00292         params.append(wout);
00293 
00294         // direct in-to-out layer
00295         if(direct_in_to_out)
00296         {
00297             wdirect = Var(true_inputsize, outputsize(), "wdirect");
00298             output += transposeProduct(wdirect, input);
00299             params.append(wdirect);
00300         }
00301 
00302         Var before_transfer_func = output;
00303    
00304         /*
00305          * output_transfer_func
00306          */
00307         unsigned int p=0;
00308         if(output_transfer_func!="" && output_transfer_func!="none")
00309         {
00310             if(output_transfer_func=="tanh")
00311                 output = tanh(output);
00312             else if(output_transfer_func=="sigmoid")
00313                 output = sigmoid(output);
00314             else if(output_transfer_func=="softplus")
00315                 output = softplus(output);
00316             else if(output_transfer_func=="exp")
00317                 output = exp(output);
00318             else if(output_transfer_func=="softmax")
00319                 output = softmax(output);
00320             else if (output_transfer_func == "log_softmax")
00321                 output = log_softmax(output);
00322             else if ((p=output_transfer_func.find("interval"))!=string::npos)
00323             {
00324                 unsigned int q = output_transfer_func.find(",");
00325                 interval_minval = atof(output_transfer_func.substr(p+1,q-(p+1)).c_str());
00326                 unsigned int r = output_transfer_func.find(")");
00327                 interval_maxval = atof(output_transfer_func.substr(q+1,r-(q+1)).c_str());
00328                 output = interval_minval + (interval_maxval - interval_minval)*sigmoid(output);
00329             }
00330             else
00331                 PLERROR("In NNet::build_()  unknown output_transfer_func option: %s",output_transfer_func.c_str());
00332         }
00333 
00334         /*
00335          * target and weights
00336          */
00337       
00338         target = Var(targetsize()-1, "target");
00339       
00340         if(weightsize_>0)
00341         {
00342             if (weightsize_!=1)
00343                 PLERROR("NeighborhoodSmoothnessNNet: expected weightsize to be 1 or 0 (or unspecified = -1, meaning 0), got %d",weightsize_);
00344             sampleweight = Var(1, "weight");
00345         }
00346 
00347         // checking penalty
00348         if( L1_penalty )
00349         {
00350             PLDEPRECATED("Option \"L1_penalty\" deprecated. Please use \"penalty_type = L1\" instead.");
00351             L1_penalty = 0;
00352             penalty_type = "L1";
00353         }
00354 
00355         string pt = lowerstring( penalty_type );
00356         if( pt == "l1" )
00357             penalty_type = "L1";
00358         else if( pt == "l1_square" || pt == "l1 square" || pt == "l1square" )
00359             penalty_type = "L1_square";
00360         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00361             penalty_type = "L2_square";
00362         else if( pt == "l2" )
00363         {
00364             PLWARNING("L2 penalty not supported, assuming you want L2 square");
00365             penalty_type = "L2_square";
00366         }
00367         else
00368             PLERROR("penalty_type \"%s\" not supported", penalty_type.c_str());
00369 
00370         // create penalties
00371         penalties.resize(0);  // prevents penalties from being added twice by consecutive builds
00372         if(w1 && ((layer1_weight_decay + weight_decay)!=0 || (layer1_bias_decay + bias_decay)!=0))
00373             penalties.append(affine_transform_weight_penalty(w1, (layer1_weight_decay + weight_decay), (layer1_bias_decay + bias_decay), penalty_type));
00374         if(w2 && ((layer2_weight_decay + weight_decay)!=0 || (layer2_bias_decay + bias_decay)!=0))
00375             penalties.append(affine_transform_weight_penalty(w2, (layer2_weight_decay + weight_decay), (layer2_bias_decay + bias_decay), penalty_type));
00376         if(wout && ((output_layer_weight_decay + weight_decay)!=0 || (output_layer_bias_decay + bias_decay)!=0))
00377             penalties.append(affine_transform_weight_penalty(wout, (output_layer_weight_decay + weight_decay), 
00378                                                              (output_layer_bias_decay + bias_decay), penalty_type));
00379         if(wdirect && (direct_in_to_out_weight_decay + weight_decay) != 0)
00380         {
00381             if (penalty_type == "L1_square")
00382                 penalties.append(square(sumabs(wdirect))*(direct_in_to_out_weight_decay + weight_decay));
00383             else if (penalty_type == "L1")
00384                 penalties.append(sumabs(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00385             else if (penalty_type == "L2_square")
00386                 penalties.append(sumsquare(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00387         }
00388 
00389         // Shared values hack...
00390         if(paramsvalues && (paramsvalues.size() == params.nelems()))
00391             params << paramsvalues;
00392         else
00393         {
00394             paramsvalues.resize(params.nelems());
00395             initializeParams();
00396         }
00397         params.makeSharedValue(paramsvalues);
00398 
00399         output->setName("element output");
00400 
00401         f = Func(input, output);
00402         f_input_to_hidden = Func(input, last_hidden);
00403 
00404         /*
00405          * costfuncs
00406          */
00407 
00408         bag_size = Var(1,1);
00409         bag_hidden = unfoldedFunc(subMat(bag_inputs, 0, 0, bag_inputs.length(), true_inputsize), f_input_to_hidden, false);
00410         p_ij = subMat(bag_inputs, 1, true_inputsize, bag_inputs->length() - 1, 1);
00411 
00412         // The q_ij function.
00413         Var hidden_0 = new SubMatTransposeVariable(bag_hidden, 0, 0, 1, bag_hidden->width());
00414         Var store_hidden(last_hidden.length(), last_hidden.width());
00415         Var hidden_0_minus_hidden = minus(hidden_0, store_hidden);
00416         Var k_hidden =
00417             exp(
00418                 timesScalar(
00419                     dot(hidden_0_minus_hidden, hidden_0_minus_hidden),
00420                     var(- 1 / (sigma_hidden * sigma_hidden))
00421                     )
00422                 );
00423         Func f_hidden_to_k_hidden(store_hidden, k_hidden);
00424         Var k_hidden_all =
00425             unfoldedFunc(
00426                 subMat(
00427                     bag_hidden, 1, 0, bag_hidden->length() - 1, bag_hidden->width()
00428                     ),
00429                 f_hidden_to_k_hidden,
00430                 false
00431                 );
00432         Var one_over_sum_of_k_hidden = invertElements(sum(k_hidden_all));
00433         Var log_q_ij = log(timesScalar(k_hidden_all, one_over_sum_of_k_hidden));
00434         Var minus_weight_sum_p_ij_log_q_ij =
00435             timesScalar(sum(times(p_ij, log_q_ij)), var(-sne_weight));
00436 
00437         int ncosts = cost_funcs.size();  
00438         if(ncosts<=0)
00439             PLERROR("In NNet::build_()  Empty cost_funcs : must at least specify the cost function to optimize!");
00440         costs.resize(ncosts);
00441       
00442         for(int k=0; k<ncosts; k++)
00443         {
00444             // create costfuncs and apply individual weights if weightpart > 1
00445             if(cost_funcs[k]=="mse")
00446                 costs[k]= sumsquare(output-target);
00447             else if(cost_funcs[k]=="mse_onehot")
00448                 costs[k] = onehot_squared_loss(output, target);
00449             else if(cost_funcs[k]=="NLL") 
00450             {
00451                 if (output->size() == 1) {
00452                     // Assume sigmoid output here!
00453                     costs[k] = cross_entropy(output, target);
00454                 } else {
00455                     if (output_transfer_func == "log_softmax")
00456                         costs[k] = -output[target];
00457                     else
00458                         costs[k] = neg_log_pi(output, target);
00459                 }
00460             } 
00461             else if(cost_funcs[k]=="class_error")
00462                 costs[k] = classification_loss(output, target);
00463             else if(cost_funcs[k]=="binary_class_error")
00464                 costs[k] = binary_classification_loss(output, target);
00465             else if(cost_funcs[k]=="multiclass_error")
00466                 costs[k] = multiclass_loss(output, target);
00467             else if(cost_funcs[k]=="cross_entropy")
00468                 costs[k] = cross_entropy(output, target);
00469             else if (cost_funcs[k]=="stable_cross_entropy") {
00470                 Var c = stable_cross_entropy(before_transfer_func, target);
00471                 costs[k] = c;
00472                 if (classification_regularizer) {
00473                     // There is a regularizer to add to the cost function.
00474                     dynamic_cast<NegCrossEntropySigmoidVariable*>((Variable*) c)->
00475                         setRegularizer(classification_regularizer);
00476                 }
00477             }
00478             else if (cost_funcs[k]=="lift_output")
00479                 costs[k] = lift_output(output, target);
00480             else  // Assume we got a Variable name and its options
00481             {
00482                 costs[k]= dynamic_cast<Variable*>(newObject(cost_funcs[k]));
00483                 if(costs[k].isNull())
00484                     PLERROR("In NNet::build_()  unknown cost_func option: %s",cost_funcs[k].c_str());
00485                 costs[k]->setParents(output & target);
00486                 costs[k]->build();
00487             }
00488           
00489             // take into account the sampleweight
00490             //if(sampleweight)
00491             //  costs[k]= costs[k] * sampleweight; // NO, because this is taken into account (more properly) in stats->update
00492         }
00493 
00494         test_costs = hconcat(costs);
00495 
00496         // Apply penalty to cost.
00497         // If there is no penalty, we still add costs[0] as the first cost, in
00498         // order to keep the same number of costs as if there was a penalty.
00499         Var test_costs_final = test_costs;
00500         Var first_cost_final = costs[0];
00501         if (penalties.size() != 0) {
00502             first_cost_final = sum(hconcat(first_cost_final & penalties));
00503         }
00504         if (weightsize_ > 0) {
00505             test_costs_final = sampleweight * test_costs;
00506             first_cost_final = sampleweight * first_cost_final;
00507         }
00508         // We add the SNE cost.
00509         // TODO Make sure we optimize the training cost.
00510         // TODO Actually maybe we should put this before multiplying by sampleweight.
00511         first_cost_final = first_cost_final + minus_weight_sum_p_ij_log_q_ij;
00512       
00513         training_cost = hconcat(first_cost_final & test_costs_final);
00514 
00515 /*      if(penalties.size() != 0) {
00516         if (weightsize_>0)
00517         // only multiply by sampleweight if there are weights
00518         training_cost = hconcat(sampleweight*sum(hconcat(costs[0] & penalties))
00519         & (test_costs*sampleweight));
00520         else {
00521         training_cost = hconcat(sum(hconcat(costs[0] & penalties)) & test_costs);
00522         }
00523         } 
00524         else {
00525         if(weightsize_>0) {
00526         // only multiply by sampleweight if there are weights
00527         training_cost = hconcat(costs[0]*sampleweight & test_costs*sampleweight);
00528         } else {
00529         training_cost = hconcat(costs[0] & test_costs);
00530         }
00531         } */
00532 
00533         training_cost->setName("training_cost");
00534         test_costs->setName("test_costs");
00535 
00536         if (weightsize_ > 0) {
00537             invars = bag_inputs & bag_size & target & sampleweight;
00538         } else {
00539             invars = bag_inputs & bag_size & target;
00540         }
00541         invars_to_training_cost = Func(invars, training_cost);
00542 
00543         invars_to_training_cost->recomputeParents();
00544 
00545         // Other funcs.
00546         VarArray outvars;
00547         VarArray testinvars;
00548         testinvars.push_back(input);
00549         outvars.push_back(output);
00550         testinvars.push_back(target);
00551         outvars.push_back(target);
00552 
00553         test_costf = Func(testinvars, output&test_costs);
00554         test_costf->recomputeParents();
00555         output_and_target_to_cost = Func(outvars, test_costs);
00556         output_and_target_to_cost->recomputeParents();
00557 
00558     }
00559 }
00560 
00562 // outputsize //
00564 int NeighborhoodSmoothnessNNet::outputsize() const
00565 { return noutputs; }
00566 
00568 // getTrainCostNames //
00570 TVec<string> NeighborhoodSmoothnessNNet::getTrainCostNames() const
00571 {
00572     return (cost_funcs[0]+"+penalty+SNE") & cost_funcs;
00573 }
00574 
00576 // getTestCostNames //
00578 TVec<string> NeighborhoodSmoothnessNNet::getTestCostNames() const
00579 { 
00580     return cost_funcs;
00581 }
00582 
00583 void NeighborhoodSmoothnessNNet::setTrainingSet(VMat training_set, bool call_forget)
00584 { 
00585     // YB: je ne suis pas sur qu'il soit necessaire de faire un build si la LONGUEUR du train_set a change? 
00586     // les methodes non-parametriques qui utilisent la longueur devrait faire leur "resize" dans train, pas dans build.
00587     bool training_set_has_changed =
00588         !train_set
00589         || train_set->width()      != training_set->width()
00590         || train_set->length()     != training_set->length()
00591         || train_set->inputsize()  != training_set->inputsize()
00592         || train_set->weightsize() != training_set->weightsize()
00593         || train_set->targetsize() != training_set->targetsize();
00594     train_set = training_set;
00595 
00596     if (training_set_has_changed && inputsize_<0)
00597     {
00598         inputsize_ = train_set->inputsize()-1;
00599         targetsize_ = train_set->targetsize();
00600         weightsize_ = train_set->weightsize();
00601     } else if (train_set->inputsize() != training_set->inputsize()) {
00602         PLERROR("In NeighborhoodSmoothnessNNet::setTrainingSet - You can't change the inputsize of the training set");
00603     }
00604     if (training_set_has_changed || call_forget)
00605         build(); // MODIF FAITE PAR YOSHUA: sinon apres un setTrainingSet le build n'est pas complete dans un NNet train_set = training_set;
00606     if (call_forget)
00607         forget();
00608 }
00609 
00611 // train //
00613 void NeighborhoodSmoothnessNNet::train()
00614 {
00615     // NeighborhoodSmoothnessNNet nstages is number of epochs (whole passages through the training set)
00616     // while optimizer nstages is number of weight updates.
00617     // So relationship between the 2 depends whether we are in stochastic, batch or minibatch mode
00618 
00619     if(!train_set)
00620         PLERROR("In NeighborhoodSmoothnessNNet::train, you did not setTrainingSet");
00621     
00622     if(!train_stats)
00623         PLERROR("In NeighborhoodSmoothnessNNet::train, you did not setTrainStatsCollector");
00624 
00625     if(f.isNull()) // Net has not been properly built yet (because build was called before the learner had a proper training set)
00626         build();
00627 
00628     int n_bags = -1;
00629     // We must count the nb of bags in the training set.
00630     {
00631         n_bags=0;
00632         int l = train_set->length();
00633         PP<ProgressBar> pb;
00634         if(report_progress)
00635             pb = new ProgressBar("Counting nb bags in train_set for NeighborhoodSmoothnessNNet", l);
00636         Vec row(train_set->width());
00637         int tag_column = train_set->inputsize() + train_set->targetsize() - 1;
00638         for (int i=0;i<l;i++) {
00639             train_set->getRow(i,row);
00640             if (int(row[tag_column]) & SumOverBagsVariable::TARGET_COLUMN_FIRST) {
00641                 // Indicates the beginning of a new bag.
00642                 n_bags++;
00643             }
00644             if(pb)
00645                 pb->update(i);
00646         }
00647     }
00648 
00649     int true_batch_size = batch_size;
00650     if (true_batch_size <= 0) {
00651         // The real batch size is actually the number of bags in the training set.
00652         true_batch_size = n_bags;
00653     }
00654 
00655     // We can now compute the total cost.
00656     Var totalcost = sumOverBags(train_set, invars_to_training_cost, max_n_instances, true_batch_size, true);
00657 
00658     // Number of optimizer stages corresponding to one learner stage (one epoch).
00659     int optstage_per_lstage = 0;
00660     if (batch_size<=0) {
00661         optstage_per_lstage = 1;
00662     } else {
00663         optstage_per_lstage = n_bags/batch_size;
00664     }
00665 
00666     if(optimizer) {
00667         optimizer->setToOptimize(params, totalcost);  
00668         optimizer->build();
00669     }
00670 
00671     PP<ProgressBar> pb;
00672     if(report_progress)
00673         pb = new ProgressBar("Training NeighborhoodSmoothnessNNet from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
00674 
00675     int initial_stage = stage;
00676     bool early_stop=false;
00677     while(stage<nstages && !early_stop)
00678     {
00679         optimizer->nstages = optstage_per_lstage;
00680         train_stats->forget();
00681         optimizer->early_stop = false;
00682         optimizer->optimizeN(*train_stats);
00683         train_stats->finalize();
00684         if(verbosity>2)
00685             cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
00686         ++stage;
00687         if(pb)
00688             pb->update(stage-initial_stage);
00689     }
00690     if(verbosity>1)
00691         cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
00692 
00693     // TODO Not sure if this is needed, but just in case...
00694     output_and_target_to_cost->recomputeParents();
00695     test_costf->recomputeParents();
00696 
00697 }
00698 
00700 // computeOutput //
00702 void NeighborhoodSmoothnessNNet::computeOutput(
00703     const Vec& inputv, Vec& outputv) const
00704 {
00705     f->fprop(inputv,outputv);
00706 }
00707 
00709 // computeOutputAndCosts //
00711 void NeighborhoodSmoothnessNNet::computeOutputAndCosts(
00712     const Vec& inputv, const Vec& targetv, Vec& outputv, Vec& costsv) const
00713 {
00714     test_costf->fprop(inputv&targetv, outputv&costsv);
00715 }
00716 
00718 // computeCostsFromOutputs //
00720 void NeighborhoodSmoothnessNNet::computeCostsFromOutputs(
00721     const Vec& inputv, const Vec& outputv, const Vec& targetv, Vec& costsv) const
00722 {
00723     output_and_target_to_cost->fprop(outputv&targetv, costsv); 
00724 }
00725 
00727 // initializeParams //
00729 void NeighborhoodSmoothnessNNet::initializeParams()
00730 {
00731     if (seed_>=0)
00732         manual_seed(seed_);
00733     else
00734         PLearn::seed();
00735 
00736     real delta = 1. / inputsize();
00737 
00738     /*
00739       if(direct_in_to_out)
00740       {
00741       //fill_random_uniform(wdirect->value, -delta, +delta);
00742       fill_random_normal(wdirect->value, 0, delta);
00743       //wdirect->matValue(0).clear();
00744       }
00745     */
00746     if(nhidden>0)
00747     {
00748         //fill_random_uniform(w1->value, -delta, +delta);
00749         //delta = 1./sqrt(nhidden);
00750         fill_random_normal(w1->value, 0, delta);
00751         if(direct_in_to_out)
00752         {
00753             //fill_random_uniform(wdirect->value, -delta, +delta);
00754             fill_random_normal(wdirect->value, 0, 0.01*delta);
00755             wdirect->matValue(0).clear();
00756         }
00757         delta = 1./nhidden;
00758         w1->matValue(0).clear();
00759     }
00760     if(nhidden2>0)
00761     {
00762         //fill_random_uniform(w2->value, -delta, +delta);
00763         //delta = 1./sqrt(nhidden2);
00764         fill_random_normal(w2->value, 0, delta);
00765         delta = 1./nhidden2;
00766         w2->matValue(0).clear();
00767     }
00768     //fill_random_uniform(wout->value, -delta, +delta);
00769     fill_random_normal(wout->value, 0, delta);
00770     wout->matValue(0).clear();
00771 
00772     // Reset optimizer
00773     if(optimizer)
00774         optimizer->reset();
00775 }
00776 
00778 // forget //
00780 void NeighborhoodSmoothnessNNet::forget()
00781 {
00782     if (train_set) initializeParams();
00783     stage = 0;
00784 }
00785 
00787 // makeDeepCopyFromShallowCopy //
00789 void NeighborhoodSmoothnessNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00790 {
00791     inherited::makeDeepCopyFromShallowCopy(copies);
00792     deepCopyField(input, copies);
00793     deepCopyField(target, copies);
00794     deepCopyField(sampleweight, copies);
00795     deepCopyField(w1, copies);
00796     deepCopyField(w2, copies);
00797     deepCopyField(wout, copies);
00798     deepCopyField(wdirect, copies);
00799     deepCopyField(last_hidden, copies);
00800     deepCopyField(output, copies);
00801     deepCopyField(bag_size, copies);
00802     deepCopyField(bag_inputs, copies);
00803     deepCopyField(bag_output, copies);
00804     deepCopyField(bag_hidden, copies);
00805     deepCopyField(invars_to_training_cost, copies);
00806 
00807     deepCopyField(costs, copies);
00808     deepCopyField(penalties, copies);
00809     deepCopyField(training_cost, copies);
00810     deepCopyField(test_costs, copies);
00811     deepCopyField(invars, copies);
00812     deepCopyField(params, copies);
00813     deepCopyField(paramsvalues, copies);
00814 
00815     deepCopyField(p_ij, copies);
00816 
00817     deepCopyField(f, copies);
00818     deepCopyField(f_input_to_hidden, copies);
00819     deepCopyField(test_costf, copies);
00820     deepCopyField(output_and_target_to_cost, copies);
00821   
00822     deepCopyField(cost_funcs, copies);
00823 
00824     deepCopyField(optimizer, copies);
00825 }
00826 
00827 } // end of namespace PLearn
00828 
00829 
00830 /*
00831   Local Variables:
00832   mode:c++
00833   c-basic-offset:4
00834   c-file-style:"stroustrup"
00835   c-file-offsets:((innamespace . 0)(inline-open . 0))
00836   indent-tabs-mode:nil
00837   fill-column:79
00838   End:
00839 */
00840 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines