PLearn 0.1
DeepFeatureExtractorNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DeepFeatureExtractorNNet.cc
00004 //
00005 // Copyright (C) 2006 Hugo Larochelle 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036    * $Id: .pyskeleton_header 544 2003-09-01 00:05:31Z plearner $ 
00037    ******************************************************* */
00038 
00039 // Authors: Hugo Larochelle
00040 
00044 #include "DeepFeatureExtractorNNet.h"
00045 #include <plearn/var/AffineTransformVariable.h>
00046 #include <plearn/var/SourceVariable.h>
00047 #include <plearn/var/AffineTransformWeightPenalty.h>
00048 #include <plearn/var/BiasWeightAffineTransformVariable.h>
00049 #include <plearn/var/BinaryClassificationLossVariable.h>
00050 #include <plearn/var/ClassificationLossVariable.h>
00051 #include <plearn/var/ConcatColumnsVariable.h>
00052 #include <plearn/var/ConcatRowsVariable.h>
00053 #include <plearn/var/CrossEntropyVariable.h>
00054 #include <plearn/var/ExpVariable.h>
00055 #include <plearn/var/LiftOutputVariable.h>
00056 #include <plearn/var/LogSoftmaxVariable.h>
00057 #include <plearn/var/MarginPerceptronCostVariable.h>
00058 #include <plearn/var/MulticlassLossVariable.h>
00059 #include <plearn/var/NegCrossEntropySigmoidVariable.h>
00060 #include "NLLNeighborhoodWeightsVariable.h"
00061 #include <plearn/var/OneHotSquaredLoss.h>
00062 #include <plearn/var/PowVariable.h>
00063 #include <plearn/var/SigmoidVariable.h>
00064 #include <plearn/var/SoftmaxVariable.h>
00065 #include <plearn/var/SoftplusVariable.h>
00066 #include <plearn/var/SubMatVariable.h>
00067 #include <plearn/var/SumVariable.h>
00068 #include <plearn/var/SumAbsVariable.h>
00069 #include <plearn/var/SumOfVariable.h>
00070 #include <plearn/var/SumSquareVariable.h>
00071 #include <plearn/var/TanhVariable.h>
00072 #include <plearn/var/TransposeVariable.h>
00073 #include <plearn/var/TransposeProductVariable.h>
00074 #include <plearn/var/UnaryHardSlopeVariable.h>
00075 #include <plearn/var/Var_operators.h>
00076 #include <plearn/var/Var_utils.h>
00077 #include <plearn/display/DisplayUtils.h>
00078 
00079 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00080 #include <plearn/vmat/GetInputVMatrix.h>
00081 #include <plearn/math/random.h>
00082 
00083 namespace PLearn {
00084 using namespace std;
00085 
00086 PLEARN_IMPLEMENT_OBJECT(
00087     DeepFeatureExtractorNNet,
00088     "Deep Neural Network that extracts features in a greedy, mostly unsupervised way",
00089     "After the greedy unsupervised phase, this learner can optionally be \n"
00090     "trained using a supervised learning criteria (i.e. MSE, class NLL, \n"
00091     "margin-perceptron cost, etc.).");
00092 
00093 DeepFeatureExtractorNNet::DeepFeatureExtractorNNet() 
00094     : batch_size(1),
00095       batch_size_supervised(1), 
00096       output_transfer_func("softmax"),
00097       nhidden_schedule_position(0),
00098       weight_decay(0), 
00099       bias_decay(0),
00100       penalty_type("L2_square"),
00101       classification_regularizer(0),
00102       regularizer(0),
00103       margin(1),
00104       initialization_method("uniform_linear"), 
00105       noutputs(0),
00106       use_same_input_and_output_weights(false),
00107       always_reconstruct_input(false),
00108       use_activations_with_cubed_input(false),
00109       use_n_first_as_supervised(-1),
00110       use_only_supervised_part(false),
00111       relative_minimum_improvement(-1),
00112       input_reconstruction_error("cross_entropy"),
00113       autoassociator_regularisation_weight(0),
00114       supervised_signal_weight(0),
00115       k_nearest_neighbors_reconstruction(-1),
00116       nhidden_schedule_current_position(-1)
00117 {
00118     random_gen = new PRandom();
00119 }
00120 
00121 void DeepFeatureExtractorNNet::declareOptions(OptionList& ol)
00122 {
00123     declareOption(ol, "nhidden_schedule", 
00124                   &DeepFeatureExtractorNNet::nhidden_schedule, 
00125                   OptionBase::buildoption,
00126                   "Number of hidden units of each hidden layers to add");
00127     
00128     declareOption(ol, "optimizer", &DeepFeatureExtractorNNet::optimizer, 
00129                   OptionBase::buildoption,
00130                   "Optimizer of the neural network");
00131 
00132     declareOption(ol, "optimizer_supervised", 
00133                   &DeepFeatureExtractorNNet::optimizer_supervised, 
00134                   OptionBase::buildoption,
00135                   "Optimizer of the supervised phase of the neural network.\n"
00136                   "If not specified, then the same optimizer will always be\n"
00137                   "used.\n");
00138 
00139     declareOption(ol, "batch_size", &DeepFeatureExtractorNNet::batch_size, 
00140                   OptionBase::buildoption, 
00141                   "How many samples to use to estimate the avergage gradient\n"
00142                   "before updating the weights\n"
00143                   "0 is equivalent to specifying training_set->length() \n");
00144     
00145     declareOption(ol, "batch_size_supervised", &DeepFeatureExtractorNNet::batch_size_supervised, 
00146                   OptionBase::buildoption, 
00147                   "How many samples to use to estimate the avergage gradient\n"
00148                   "before updating the weights, for the supervised phase.\n"
00149                   "0 is equivalent to specifying training_set->length() \n");
00150     
00151     declareOption(ol, "output_transfer_func", 
00152                   &DeepFeatureExtractorNNet::output_transfer_func, 
00153                   OptionBase::buildoption,
00154                   "Output transfer function, when all hidden layers are \n"
00155                   "added. Choose among:\n"
00156                   "  - \"tanh\" \n"
00157                   "  - \"sigmoid\" \n"
00158                   "  - \"exp\" \n"
00159                   "  - \"softplus\" \n"
00160                   "  - \"softmax\" \n"
00161                   "  - \"log_softmax\" \n"
00162                   "  - \"interval(<minval>,<maxval>)\", which stands for\n"
00163                   "          <minval>+(<maxval>-<minval>)*sigmoid(.).\n"
00164                   "An empty string or \"none\" means no output \n"
00165                   "transfer function \n");
00166     
00167     declareOption(ol, "nhidden_schedule_position", 
00168                   &DeepFeatureExtractorNNet::nhidden_schedule_position, 
00169                   OptionBase::buildoption,
00170                   "Index of the layer(s) that will be trained at the next\n"
00171                   "call of train. Should be bigger then the last\n"
00172                   "nhidden_schedule_position, which is initialy -1. \n"
00173                   "Then, all the layers up to nhidden_schedule_position that\n"
00174                   "were not trained so far will be. Also, when\n"
00175                   "nhidden_schedule_position is greater than or equal\n"
00176                   "to the size of nhidden_schedule, then the output layer is also\n"
00177                   "added.");
00178     
00179     declareOption(ol, "nhidden_schedule_current_position", 
00180                   &DeepFeatureExtractorNNet::nhidden_schedule_current_position, 
00181                   OptionBase::learntoption,
00182                   "Index of the layer that is being trained at the current state");
00183 
00184     declareOption(ol, "cost_funcs", &DeepFeatureExtractorNNet::cost_funcs, 
00185                   OptionBase::buildoption, 
00186                   "A list of cost functions to use\n"
00187                   "in the form \"[ cf1; cf2; cf3; ... ]\"\n"
00188                   "where each function is one of: \n"
00189                   "  - \"mse\" (for regression)\n"
00190                   "  - \"mse_onehot\" (for classification)\n"
00191                   "  - \"NLL\" (negative log likelihood -log(p[c])\n"
00192                   "             for classification) \n"
00193                   "  - \"class_error\" (classification error) \n"
00194                   "  - \"binary_class_error\" (classification error for a\n"
00195                   "                            0-1 binary classifier)\n"
00196                   "  - \"multiclass_error\" \n"
00197                   "  - \"cross_entropy\" (for binary classification)\n"
00198                   "  - \"stable_cross_entropy\" (more accurate backprop and\n"
00199                   "                              possible regularization, for\n"
00200                   "                              binary classification)\n"
00201                   "  - \"margin_perceptron_cost\" (a hard version of the \n"
00202                   "                                cross_entropy, uses the\n"
00203                   "                                'margin' option)\n"
00204                   "  - \"lift_output\" (not a real cost function, just the\n"
00205                   "                     output for lift computation)\n"
00206                   "The FIRST function of the list will be used as \n"
00207                   "the objective function to optimize \n"
00208                   "(possibly with an added weight decay penalty) \n");
00209     
00210     declareOption(ol, "weight_decay", 
00211                   &DeepFeatureExtractorNNet::weight_decay, OptionBase::buildoption, 
00212                   "Global weight decay for all layers\n");
00213 
00214     declareOption(ol, "bias_decay", &DeepFeatureExtractorNNet::bias_decay, 
00215                   OptionBase::buildoption, 
00216                   "Global bias decay for all layers\n");
00217     
00218     declareOption(ol, "penalty_type", &DeepFeatureExtractorNNet::penalty_type,
00219                   OptionBase::buildoption,
00220                   "Penalty to use on the weights (for weight and bias decay).\n"
00221                   "Can be any of:\n"
00222                   "  - \"L1\": L1 norm,\n"
00223                   //"  - \"L1_square\": square of the L1 norm,\n"
00224                   "  - \"L2_square\" (default): square of the L2 norm.\n");
00225     
00226     declareOption(ol, "classification_regularizer", 
00227                   &DeepFeatureExtractorNNet::classification_regularizer, 
00228                   OptionBase::buildoption, 
00229                   "Used only in the stable_cross_entropy cost function, to fight overfitting (0<=r<1)\n");  
00230     
00231     declareOption(ol, "regularizer", &DeepFeatureExtractorNNet::regularizer, 
00232                   OptionBase::buildoption, 
00233                   "Used in the stable_cross_entropy cost function for the hidden activations, in the unsupervised stages (0<=r<1)\n");  
00234     
00235     declareOption(ol, "margin", &DeepFeatureExtractorNNet::margin, 
00236                   OptionBase::buildoption, 
00237                   "Margin requirement, used only with the \n"
00238                   "margin_perceptron_cost cost function.\n"
00239                   "It should be positive, and larger values regularize more.\n");
00240     
00241     declareOption(ol, "initialization_method", 
00242                   &DeepFeatureExtractorNNet::initialization_method, 
00243                   OptionBase::buildoption, 
00244                   "The method used to initialize the weights:\n"
00245                   " - \"normal_linear\"  = a normal law with variance 1/n_inputs\n"
00246                   " - \"normal_sqrt\"    = a normal law with variance"
00247                   "1/sqrt(n_inputs)\n"
00248                   " - \"uniform_linear\" = a uniform law in [-1/n_inputs, "
00249                   "1/n_inputs]\n"
00250                   " - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(n_inputs), "
00251                   "1/sqrt(n_inputs)]\n"
00252                   " - \"zero\"           = all weights are set to 0\n");
00253     
00254     declareOption(ol, "paramsvalues", &DeepFeatureExtractorNNet::paramsvalues, 
00255                   OptionBase::learntoption, 
00256                   "The learned parameter vector\n");
00257     declareOption(ol, "noutputs", &DeepFeatureExtractorNNet::noutputs, 
00258                   OptionBase::buildoption, 
00259                   "Number of output units. This gives this learner \n"
00260                   "its outputsize. It is typically of the same dimensionality\n"
00261                   "as the target for regression problems\n"
00262                   "But for classification problems where target is just\n"
00263                   "the class number, noutputs is usually of dimensionality \n"
00264                   "number of classes (as we want to output a score or\n"
00265                   "probability vector, one per class)\n");    
00266 
00267     declareOption(ol, "use_same_input_and_output_weights", 
00268                   &DeepFeatureExtractorNNet::use_same_input_and_output_weights, 
00269                   OptionBase::buildoption, 
00270                   "Use the same weights for the input and output weights for\n"
00271                   "the autoassociators.");  
00272 
00273     declareOption(ol, "always_reconstruct_input", 
00274                   &DeepFeatureExtractorNNet::always_reconstruct_input, 
00275                   OptionBase::buildoption, 
00276                   "Always use the reconstruction cost of the input, not of\n"
00277                   "the last layer. This option should be used if\n"
00278                   "use_same_input_and_output_weights is true.");  
00279 
00280     declareOption(ol, "use_activations_with_cubed_input", 
00281                   &DeepFeatureExtractorNNet::use_activations_with_cubed_input, 
00282                   OptionBase::buildoption, 
00283                   "Use the cubed value of the input of the activation functions\n"
00284                   "(not used for reconstruction/auto-associator layers and\n"
00285                   " output layer).\n");
00286 
00287     declareOption(ol, "use_n_first_as_supervised", 
00288                   &DeepFeatureExtractorNNet::use_n_first_as_supervised, 
00289                   OptionBase::buildoption, 
00290                   "To simulate semi-supervised learning.");
00291 
00292     declareOption(ol, "use_only_supervised_part", 
00293                   &DeepFeatureExtractorNNet::use_only_supervised_part, 
00294                   OptionBase::buildoption, 
00295                   "Indication that only the supervised part should be\n"
00296                   "used, throughout the whole training, when simulating\n"
00297                   "semi-supervised learning.");
00298 
00299     declareOption(ol, "relative_minimum_improvement", 
00300                   &DeepFeatureExtractorNNet::relative_minimum_improvement,
00301                   OptionBase::buildoption, 
00302                   "Threshold on training set error relative improvement,\n"
00303                   "before adding a new layer. If < 0, then the addition\n"
00304                   "of layers must be done by the user." );
00305 
00306     declareOption(ol, "autoassociator_regularisation_weight", 
00307                   &DeepFeatureExtractorNNet::autoassociator_regularisation_weight,
00308                   OptionBase::buildoption, 
00309                   "Weight of autoassociator regularisation terms\n"
00310                   "in the fine-tuning phase.\n"
00311                   "If it is equal to 0,\n"
00312                   "then the unsupervised signal is ignored.\n");
00313 
00314      declareOption(ol, "input_reconstruction_error", 
00315                   &DeepFeatureExtractorNNet::input_reconstruction_error,
00316                   OptionBase::buildoption, 
00317                    "Input reconstruction error. The reconstruction error\n"
00318                    "of the hidden layers will always be \"cross_entropy\"."
00319                    "Choose among:\n"
00320                    "  - \"cross_entropy\" (default)\n"
00321                    "  - \"mse\" \n");
00322 
00323      declareOption(ol, "supervised_signal_weight", 
00324                   &DeepFeatureExtractorNNet::supervised_signal_weight,
00325                   OptionBase::buildoption, 
00326                    "Weight of supervised signal used in addition\n"
00327                   "to unsupervised signal in greedy phase.\n"
00328                   "This weights should be in [0,1]. If it is equal\n"
00329                   "to 0, then the supervised signal is ignored.\n"
00330                   "If it is equal to 1, then the unsupervised signal\n"
00331                   "is ignored.\n");
00332 
00333      declareOption(ol, "k_nearest_neighbors_reconstruction", 
00334                   &DeepFeatureExtractorNNet::k_nearest_neighbors_reconstruction,
00335                   OptionBase::buildoption, 
00336                    "Number of nearest neighbors to reconstruct in greedy phase.");
00337     // Now call the parent class' declareOptions
00338     inherited::declareOptions(ol);
00339 }
00340 
00341 void DeepFeatureExtractorNNet::build_()
00342 {
00343     /*
00344      * Create Topology Var Graph
00345      */
00346 
00347     // nhidden_schedule_position's maximum value is nhidden_schedule.length()+1,
00348     // which means that the network is in its fine-tuning phase.
00349     if(nhidden_schedule_position > nhidden_schedule.length()+1)
00350         nhidden_schedule_position = nhidden_schedule.length()+1;
00351 
00352     // Don't do anything if we don't have a train_set
00353     // It's the only one who knows the inputsize and targetsize anyway...
00354     // Also, nothing is done if no layers need to be added
00355     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0 
00356        && nhidden_schedule_current_position < nhidden_schedule.length()+1 
00357        && nhidden_schedule_current_position < nhidden_schedule_position)
00358     {
00359 
00360         if(use_n_first_as_supervised > 0)
00361             sup_train_set = train_set.subMatRows(0,use_n_first_as_supervised);
00362 
00363         // Initialize the input.
00364         if(nhidden_schedule_current_position < 0)
00365         {
00366             input = Var(inputsize(), "input");
00367             output = input;
00368             weights.resize(0);
00369             reconstruction_weights.resize(0);
00370             params.resize(0);
00371             biases.resize(0);
00372             if(use_same_input_and_output_weights)
00373             {
00374                 Var b = new SourceVariable(1,inputsize());
00375                 b->setName("b0");
00376                 b->value.clear();
00377                 biases.push_back(b);
00378             }
00379             if (seed_ != 0) random_gen->manual_seed(seed_);
00380             if(autoassociator_regularisation_weight > 0) 
00381             {
00382                 autoassociator_training_costs.resize(nhidden_schedule.length());
00383                 autoassociator_params.resize(nhidden_schedule.length());
00384             }
00385         }
00386 
00387         feature_vector = hidden_representation;
00388 
00389         if(nhidden_schedule_current_position < nhidden_schedule_position)
00390         {
00391             // Update de network's topology
00392             if(nhidden_schedule_current_position < nhidden_schedule.length()
00393                && nhidden_schedule_current_position>=0)
00394                 output = hidden_representation;
00395 
00396             Var before_transfer_function;
00397             params_to_train.resize(0);  // Will now train new set of weights
00398 
00399             // Will reconstruct input ...
00400             if(nhidden_schedule_current_position < 0 || always_reconstruct_input)
00401             {
00402                 if(k_nearest_neighbors_reconstruction>=0)
00403                     unsupervised_target = 
00404                         Var((k_nearest_neighbors_reconstruction+1)*inputsize());
00405                 else
00406                     unsupervised_target = input;
00407             }
00408             else // ... or will reconstruct last hidden layer
00409             {
00410                 if(k_nearest_neighbors_reconstruction>=0)
00411                     unsupervised_target = 
00412                         Var((k_nearest_neighbors_reconstruction+1)
00413                             *nhidden_schedule[nhidden_schedule_current_position]);
00414                 else
00415                     unsupervised_target = hidden_representation;
00416             }
00417 
00418             // Number of hidden layers added
00419             int n_added_layers = 0;
00420 
00421             if((nhidden_schedule_position < nhidden_schedule.length() 
00422                 && supervised_signal_weight != 1) && 
00423                use_same_input_and_output_weights)
00424             {
00425                 params_to_train.push_back(biases.last());
00426             }
00427         
00428             // Add new hidden layers until schedule position is reached
00429             // or all hidden layers have been added
00430             while(nhidden_schedule_current_position < nhidden_schedule_position 
00431                   && nhidden_schedule_current_position+1 < 
00432                   nhidden_schedule.length())
00433             {
00434                 nhidden_schedule_current_position++;
00435                 n_added_layers++;
00436                 Var w;
00437 
00438                 // Share layer and reconstruction weights ...
00439                 if(use_same_input_and_output_weights)
00440                 {
00441                     // Weights
00442                     Var w_weights = new SourceVariable(
00443                         output->size(),
00444                         nhidden_schedule[nhidden_schedule_current_position]);
00445                     w_weights->setName("w" + tostring(nhidden_schedule_current_position+1));
00446                     weights.push_back(w_weights);
00447                     fillWeights(w_weights,false);
00448                     params.push_back(w_weights);
00449                     params_to_train.push_back(w_weights);
00450 
00451                     // Bias
00452                     Var w_biases = new SourceVariable(
00453                         1,nhidden_schedule[nhidden_schedule_current_position]);
00454                     w_biases->setName("b" + tostring(nhidden_schedule_current_position+1));
00455                     biases.push_back(w_biases);
00456                     w_biases->value.clear();
00457                     params.push_back(w_biases);
00458                     params_to_train.push_back(w_biases);
00459 
00460                     //w = vconcat(w_biases & w_weights);
00461                     output = hiddenLayer(
00462                         output,w_weights,w_biases,false,"sigmoid",
00463                         before_transfer_function,use_activations_with_cubed_input);
00464                     //output = hiddenLayer(
00465                     //    output,w,"sigmoid",
00466                     //    before_transfer_function,use_activations_with_cubed_input);
00467                 }
00468                 else // ... or have different set of weights.
00469                 {
00470                     // Weights and bias
00471                     w = new SourceVariable(
00472                         output->size()+1,
00473                         nhidden_schedule[nhidden_schedule_current_position]);
00474                     w->setName("wb" + tostring(nhidden_schedule_current_position+1));
00475                     weights.push_back(w);
00476                     fillWeights(w,true,0);            
00477                     params.push_back(w);
00478                     params_to_train.push_back(w);
00479                     output = hiddenLayer(
00480                         output,w,"sigmoid",
00481                         before_transfer_function,use_activations_with_cubed_input);
00482                 }
00483 
00484                 hidden_representation = output;
00485             }
00486 
00487             // Add supervised layer, when all hidden layers have been trained
00488             // or when a supervised target is also used in the greedy phase.
00489         
00490             if(supervised_signal_weight < 0 || supervised_signal_weight > 1)
00491                 PLERROR("In DeepFeatureExtractorNNet::build_(): "
00492                         "supervised_signal_weight should be in [0,1]");
00493 
00494             Var output_sup;
00495             if(nhidden_schedule_position < nhidden_schedule.length() 
00496                && supervised_signal_weight > 0)
00497                 output_sup = output;
00498 
00499             if(nhidden_schedule_current_position < nhidden_schedule_position)
00500                 nhidden_schedule_current_position++;
00501 
00502             if(output_sup || 
00503                nhidden_schedule_current_position == nhidden_schedule.length())
00504             {
00505                 if(noutputs<=0) 
00506                     PLERROR("In DeepFeatureExtractorNNet::build_(): "
00507                             "building the output layer but noutputs<=0");
00508 
00509                 Var w = new SourceVariable(output->size()+1,noutputs);
00510                 w->setName("wbout");
00511                 fillWeights(w,true,0);
00512             
00513                 // If all hidden layers have been added, these weights
00514                 // can be added to the network
00515                 if(nhidden_schedule_current_position == nhidden_schedule.length())
00516                 {
00517                     params.push_back(w);
00518                     weights.push_back(w);
00519                 }
00520 
00521                 params_to_train.push_back(w);
00522                 if(output_sup)
00523                     output_sup = hiddenLayer(
00524                         output_sup,w,
00525                         output_transfer_func,before_transfer_function);
00526                 else
00527                     output = hiddenLayer(output,w,
00528                                          output_transfer_func,
00529                                          before_transfer_function);            
00530             }
00531 
00532             if(nhidden_schedule_current_position < nhidden_schedule_position)
00533                 nhidden_schedule_current_position++;            
00534 
00535             if(nhidden_schedule_current_position == nhidden_schedule.length()+1)
00536             {
00537                 params_to_train.resize(0);
00538                 // Fine-tune the whole network
00539                 for(int i=0; i<params.length(); i++)
00540                     params_to_train.push_back(params[i]);
00541             }
00542 
00543             // Add reconstruction/auto-associator layer
00544             reconstruction_weights.resize(0);
00545             if(supervised_signal_weight != 1 
00546                && nhidden_schedule_current_position < nhidden_schedule.length())
00547             {
00548                 int it = 0;
00549                 // Add reconstruction/auto-associator layers until last layer
00550                 // is reached, or until input reconstruction is reached
00551                 // if always_reconstruct_input is true
00552                 string rec_trans_func = "some_transfer_func";
00553                 while((!always_reconstruct_input && n_added_layers > 0) 
00554                       || (always_reconstruct_input && it<weights.size()))
00555                 {                    
00556                     n_added_layers--;
00557                     it++;                
00558 
00559                     if((always_reconstruct_input 
00560                         && nhidden_schedule_current_position-it == -1) 
00561                        || nhidden_schedule_current_position == 0)
00562                     {
00563                         if(input_reconstruction_error == "cross_entropy")
00564                             rec_trans_func = "sigmoid";
00565                         else if (input_reconstruction_error == "mse")
00566                             rec_trans_func = "linear";
00567                         else PLERROR("In DeepFeatureExtractorNNet::build_(): %s "
00568                                      "is not a valid reconstruction error", 
00569                                      input_reconstruction_error.c_str());
00570                     }
00571                     else
00572                         rec_trans_func = "sigmoid";
00573 
00574                     if(use_same_input_and_output_weights)
00575                     {
00576                         output =  hiddenLayer(
00577                             output,weights[weights.size()-it],
00578                             biases[biases.size()-it-1], 
00579                             true, rec_trans_func,
00580                             before_transfer_function,
00581                             use_activations_with_cubed_input);
00582                         //output =  hiddenLayer(
00583                         //    output, 
00584                         //    vconcat(biases[biases.size()-it-1]
00585                         //            & transpose(weights[weights.size()-it])),
00586                         //    rec_trans_func,
00587                         //    before_transfer_function,
00588                         //    use_activations_with_cubed_input);
00589                     }
00590                     else
00591                     {
00592                         Var rw;
00593                         if(nhidden_schedule_current_position-it == -1)
00594                             rw  = new SourceVariable(output->size()+1,inputsize());
00595                         else
00596                             rw  = new SourceVariable(
00597                                 output->size()+1,
00598                                 nhidden_schedule[
00599                                     nhidden_schedule_current_position-it]);
00600                         reconstruction_weights.push_back(rw);
00601                         rw->setName("rwb" + tostring(nhidden_schedule_current_position-it+1));
00602                         fillWeights(rw,true,0);
00603                         params_to_train.push_back(rw);
00604                         output =  hiddenLayer(
00605                             output,rw, rec_trans_func,
00606                             before_transfer_function,
00607                             use_activations_with_cubed_input);
00608                     }                
00609                 }         
00610             }
00611 
00612             // Build target and weight variables.
00613             buildTargetAndWeight();
00614 
00615             // Build costs.
00616             string pt = lowerstring( penalty_type );
00617             if( pt == "l1" )
00618                 penalty_type = "L1";
00619             //else if( pt == "l1_square" || pt == "l1 square" || pt == "l1square" )
00620             //    penalty_type = "L1_square";
00621             else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00622                 penalty_type = "L2_square";
00623             else if( pt == "l2" )
00624             {
00625                 PLWARNING("L2 penalty not supported, assuming you want L2 square");
00626                 penalty_type = "L2_square";
00627             }
00628             else
00629                 PLERROR("penalty_type \"%s\" not supported", penalty_type.c_str());
00630 
00631             buildCosts(output, target, 
00632                        unsupervised_target, before_transfer_function, output_sup);
00633         
00634             // Build functions.
00635             buildFuncs(input, output, target, sampleweight);
00636 
00637         }
00638         
00639         if((bool)paramsvalues && (paramsvalues.size() == params.nelems()))
00640             params << paramsvalues;
00641         else
00642             paramsvalues.resize(params.nelems());
00643         params.makeSharedValue(paramsvalues);
00644         
00645         // Reinitialize the optimization phase
00646         if(optimizer)
00647             optimizer->reset();
00648         if(optimizer_supervised)
00649             optimizer_supervised->reset();
00650         stage = 0;
00651     }
00652 }
00653 
00654 // ### Nothing to add here, simply calls build_
00655 void DeepFeatureExtractorNNet::build()
00656 {
00657     inherited::build();
00658     build_();
00659 }
00660 
00661 
00662 void DeepFeatureExtractorNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00663 {
00664     inherited::makeDeepCopyFromShallowCopy(copies);
00665 
00666     // Public
00667 
00668     deepCopyField(nhidden_schedule, copies);
00669     deepCopyField(optimizer, copies);
00670     deepCopyField(optimizer_supervised, copies);
00671     deepCopyField(cost_funcs, copies);
00672     deepCopyField(paramsvalues, copies);
00673 
00674     // Protected
00675 
00676     deepCopyField(params, copies);
00677     deepCopyField(params_to_train, copies);
00678     deepCopyField(weights, copies);
00679     deepCopyField(reconstruction_weights, copies);
00680     deepCopyField(biases, copies);
00681     deepCopyField(invars, copies);
00682     varDeepCopyField(input, copies);
00683     varDeepCopyField(output, copies);
00684     varDeepCopyField(feature_vector, copies);
00685     varDeepCopyField(hidden_representation, copies);
00686     varDeepCopyField(neighbor_indices, copies);
00687     varDeepCopyField(target, copies);
00688     varDeepCopyField(unsupervised_target, copies);
00689     varDeepCopyField(sampleweight, copies);
00690     deepCopyField(costs, copies);
00691     deepCopyField(penalties, copies);
00692     varDeepCopyField(training_cost, copies);
00693     varDeepCopyField(test_costs, copies);
00694     deepCopyField(sup_train_set, copies);
00695     deepCopyField(unsup_train_set, copies);
00696     deepCopyField(knn_train_set, copies);
00697     deepCopyField(f, copies);
00698     deepCopyField(test_costf, copies);
00699     deepCopyField(output_and_target_to_cost, copies);
00700     deepCopyField(to_feature_vector, copies);
00701     deepCopyField(autoassociator_params, copies);
00702     deepCopyField(autoassociator_training_costs, copies);
00703 
00704     
00705 
00706     //PLERROR("DeepFeatureExtractorNNet::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00707 }
00708 
00709 
00710 int DeepFeatureExtractorNNet::outputsize() const
00711 {
00712     if(output)
00713         return output->size();
00714     else
00715         return 0;
00716 }
00717 
00718 void DeepFeatureExtractorNNet::forget()
00719 {
00720     if(optimizer)
00721         optimizer->reset();
00722     if(optimizer_supervised)
00723         optimizer_supervised->reset();
00724     stage = 0;
00725     
00726     params.resize(0);
00727     weights.resize(0);
00728     nhidden_schedule_current_position = -1;
00729     build();
00730 }
00731     
00732 void DeepFeatureExtractorNNet::train()
00733 {
00734     if(!train_set)
00735         PLERROR("In DeepFeatureExtractor::train, you did not setTrainingSet");
00736     
00737     if(!train_stats)
00738         PLERROR("In DeepFeatureExtractor::train, you did not setTrainStatsCollector");
00739 
00740     // k nearest neighbors prediction
00741     if(k_nearest_neighbors_reconstruction>=0 
00742        && nhidden_schedule_current_position < nhidden_schedule.length())
00743     {
00744         if(relative_minimum_improvement <= 0)
00745             PLERROR("In DeepFeatureExtractorNNEt::build_(): "
00746                     "relative_minimum_improvement need to be > 0 when "
00747                     "using nearest neighbors reconstruction");
00748         if(nhidden_schedule_current_position==0) 
00749         {
00750             // Compute nearest neighbors in input space
00751             if(verbosity > 2) cout << "Computing nearest neighbors" << endl;
00752             knn_train_set = new AppendNeighborsVMatrix();
00753             knn_train_set->source = train_set;
00754             knn_train_set->n_neighbors = k_nearest_neighbors_reconstruction;
00755             knn_train_set->append_neighbor_indices = false;
00756             knn_train_set->build();
00757             unsup_train_set = (VMatrix*) knn_train_set;
00758             if(verbosity > 2) cout << "Done" << endl;
00759 
00760             // Append input
00761             unsup_train_set = hconcat(
00762                 new GetInputVMatrix(train_set),unsup_train_set);
00763             unsup_train_set->defineSizes(train_set->inputsize()*
00764                                          (k_nearest_neighbors_reconstruction+2),
00765                                          train_set->targetsize(),
00766                                          train_set->weightsize()); 
00767         }
00768         else
00769         {
00770             // Compute nearest neighbors in feature (hidden layer) space
00771             if(verbosity > 2) cout << "Computing nearest neighbors and performing transformation to hidden representation" << endl;
00772             knn_train_set->transformation =  to_feature_vector;
00773             knn_train_set->defineSizes(-1,-1,-1);
00774             knn_train_set->build();
00775             unsup_train_set = (VMatrix *)knn_train_set;
00776             if(verbosity > 2) cout << "Done" << endl;
00777 
00778             int feat_size = to_feature_vector->outputsize;
00779             // Append input
00780             unsup_train_set = hconcat(
00781                 new GetInputVMatrix(train_set),unsup_train_set);
00782             unsup_train_set->defineSizes(
00783                 train_set->inputsize()
00784                 +feat_size*(k_nearest_neighbors_reconstruction+1),
00785                 train_set->targetsize(),train_set->weightsize());            
00786         }
00787 
00788     }
00789 
00790 
00791     int l;
00792     if(sup_train_set && 
00793        (supervised_signal_weight == 1
00794         || nhidden_schedule_current_position >= nhidden_schedule.length()))
00795         l = sup_train_set->length();  
00796     else
00797         if(unsup_train_set 
00798            && nhidden_schedule_current_position < nhidden_schedule.length())
00799             l = unsup_train_set->length();  
00800         else
00801             l = train_set->length();
00802 
00803     // Net has not been properly built yet 
00804     // (because build was called before the learner had a proper training set)
00805     if(f.isNull()) 
00806         build();
00807 
00808     // Update de DeepFeatureExtractor structure if necessary
00809     if(nhidden_schedule_current_position < nhidden_schedule_position)
00810         build();
00811 
00812     // Number of samples seen by optimizer before each optimizer update
00813     int nsamples;
00814     if(supervised_signal_weight == 1
00815        || nhidden_schedule_current_position >= nhidden_schedule.length())
00816         nsamples = batch_size_supervised>0 ? batch_size_supervised : l;        
00817     else
00818         nsamples = batch_size>0 ? batch_size : l;
00819 
00820 
00821     // Parameterized function to optimize
00822     Func paramf = Func(invars, training_cost); 
00823     Var totalcost;
00824     
00825     if(sup_train_set 
00826        && (supervised_signal_weight == 1
00827            || nhidden_schedule_current_position >= nhidden_schedule.length()))
00828         totalcost = meanOf(sup_train_set,paramf,nsamples);
00829     else
00830         if(unsup_train_set 
00831            && nhidden_schedule_current_position < nhidden_schedule.length())
00832             totalcost = meanOf(unsup_train_set, paramf, nsamples);
00833         else            
00834             totalcost = meanOf(train_set, paramf, nsamples);
00835 
00836     PP<Optimizer> this_optimizer;
00837 
00838     if(optimizer_supervised 
00839        && nhidden_schedule_current_position >= nhidden_schedule.length())
00840     {
00841         if(nhidden_schedule_current_position == nhidden_schedule.length()+1
00842            && autoassociator_regularisation_weight>0)
00843         {            
00844             optimizer_supervised->setToOptimize(
00845                 params_to_train, totalcost, autoassociator_training_costs, 
00846                 autoassociator_params, 
00847                 autoassociator_regularisation_weight);
00848         }
00849         else
00850             optimizer_supervised->setToOptimize(params_to_train, totalcost);
00851         optimizer_supervised->build();
00852         this_optimizer = optimizer_supervised;
00853     }
00854     else if(optimizer)
00855     {
00856         if(nhidden_schedule_current_position == nhidden_schedule.length()+1
00857            && autoassociator_regularisation_weight>0)
00858             optimizer->setToOptimize(
00859                 params_to_train, totalcost, autoassociator_training_costs, 
00860                 autoassociator_params, autoassociator_regularisation_weight);
00861         else
00862             optimizer->setToOptimize(params_to_train, totalcost);
00863 
00864         optimizer->build();
00865         this_optimizer = optimizer;
00866     }
00867     else PLERROR("DeepFeatureExtractor::train can't train without setting "
00868                  "an optimizer first!");
00869 
00870     // Number of optimizer stages corresponding to one learner stage (one epoch)
00871     int optstage_per_lstage = l/nsamples;
00872 
00873     PP<ProgressBar> pb;
00874     if(report_progress)
00875         pb = new ProgressBar("Training " + classname() + " from stage " 
00876                              + tostring(stage) + " to " + tostring(nstages), 
00877                              nstages-stage);
00878 
00879     //displayFunction(paramf, true, false, 250);
00880     //cout << params_to_train.size() << " params to train" << endl;
00881     //cout << params.size() << " params" << endl;
00882     int initial_stage = stage;
00883     real last_error = REAL_MAX;
00884     real this_error = 0;
00885     Vec stats;
00886     bool flag = (relative_minimum_improvement >= 0 
00887                  && nhidden_schedule_current_position <= nhidden_schedule.length());
00888 
00889     if(verbosity>2) cout << "Training layer " 
00890                          << nhidden_schedule_current_position+1 << endl;
00891 
00892     while((stage<nstages || flag))
00893     {
00894         this_optimizer->nstages = optstage_per_lstage;
00895         train_stats->forget();
00896         this_optimizer->early_stop = false;
00897         this_optimizer->optimizeN(*train_stats);
00898         // Uncomment the following if you want to check your new Var.
00899         // optimizer->verifyGradient(1e-4); 
00900         train_stats->finalize();
00901         stats = train_stats->getMean();
00902         if(verbosity>2)
00903         {
00904             if(flag)
00905                 cout << "Initialization epoch, reconstruction train objective: " 
00906                      << stats << endl;
00907             else
00908                 cout << "Epoch " << stage << " train objective: " << stats << endl;
00909         }
00910         if(pb)
00911             pb->update(stage-initial_stage);
00912 
00913         this_error = stats[stats.length()-2];
00914         if(flag 
00915            && last_error - this_error < relative_minimum_improvement * last_error) 
00916             break;
00917         if(!flag) ++stage;
00918         last_error = this_error;
00919     }
00920     if(verbosity>1)
00921         cout << "EPOCH " << stage << " train objective: " 
00922              << train_stats->getMean() << endl;
00923 
00924     output_and_target_to_cost->recomputeParents();
00925     test_costf->recomputeParents();
00926     
00927     if(relative_minimum_improvement >= 0 
00928        && nhidden_schedule_current_position <= nhidden_schedule.length())
00929     {
00930         nhidden_schedule_position++;
00931         totalcost = 0;
00932         build();
00933         train();
00934     }
00935     //PLERROR("fuck");
00936 }
00937 
00938 void DeepFeatureExtractorNNet::computeOutput(const Vec& input, Vec& output) const
00939 {
00940     output.resize(outputsize());
00941     f->fprop(input,output);
00942 }    
00943 
00944 void DeepFeatureExtractorNNet::computeCostsFromOutputs(const Vec& input, 
00945                                                        const Vec& output, 
00946                                            const Vec& target, Vec& costs) const
00947 {
00948 #ifdef BOUNDCHECK
00949     // Stable cross entropy needs the value *before* the transfer function.
00950     if (cost_funcs.contains("stable_cross_entropy"))
00951         PLERROR("In NNet::computeCostsFromOutputs - Cannot directly compute stable "
00952                 "cross entropy from output and target");
00953 #endif
00954     output_and_target_to_cost->fprop(output&target, costs); 
00955 }
00956 
00957 void DeepFeatureExtractorNNet::computeOutputAndCosts(const Vec& inputv, 
00958                                                      const Vec& targetv, 
00959                                                      Vec& outputv, 
00960                                                      Vec& costsv) const
00961 {
00962     outputv.resize(outputsize());
00963     test_costf->fprop(inputv&targetv, outputv&costsv);
00964 }
00965 
00966 TVec<string> DeepFeatureExtractorNNet::getTestCostNames() const
00967 {
00968     TVec<string> costs_str = cost_funcs.copy();
00969     costs_str.push_back("reconstruction_error");
00970     costs_str.push_back("nhidden_schedule_current_position");
00971     return costs_str;
00972 }
00973 
00974 TVec<string> DeepFeatureExtractorNNet::getTrainCostNames() const
00975 {
00976     return getTestCostNames();
00977 }
00978 
00979 void DeepFeatureExtractorNNet::buildTargetAndWeight() {
00980     if(targetsize() > 0)
00981     {        
00982         target = Var(targetsize(), "target");
00983         if(weightsize_>0)
00984         {
00985             if (weightsize_!=1)
00986                 PLERROR("In NNet::buildTargetAndWeight - Expected weightsize to "
00987                         "be 1 or 0 (or unspecified = -1, meaning 0), got %d",
00988                         weightsize_);
00989             sampleweight = Var(1, "weight");
00990         }
00991     }
00992 }
00993 
00994 void DeepFeatureExtractorNNet::buildCosts(const Var& the_output, 
00995                                           const Var& the_target, 
00996                                           const Var& the_unsupervised_target, 
00997                                           const Var& before_transfer_func, 
00998                                           const Var& output_sup) 
00999 {
01000     costs.resize(0);
01001 
01002     // If in a mainly supervised phase ...
01003     if(nhidden_schedule_current_position >= nhidden_schedule.length())
01004     {
01005 
01006         // ... add supervised costs ...
01007         int ncosts = cost_funcs.size();  
01008         costs.resize(ncosts);
01009         
01010         for(int k=0; k<ncosts; k++)
01011         {
01012             // create costfuncs and apply individual weights if weightpart > 1
01013             if(cost_funcs[k]=="mse")
01014                 costs[k]= sumsquare(the_output-the_target);
01015             else if(cost_funcs[k]=="mse_onehot")
01016                 costs[k] = onehot_squared_loss(the_output, the_target);
01017             else if(cost_funcs[k]=="NLL") 
01018             {
01019                 if (the_output->size() == 1) {
01020                     // Assume sigmoid output here!
01021                     costs[k] = cross_entropy(the_output, the_target);
01022                 } else {
01023                     if (output_transfer_func == "log_softmax")
01024                         costs[k] = -the_output[the_target];
01025                     else
01026                         costs[k] = neg_log_pi(the_output, the_target);
01027                 }
01028             } 
01029             else if(cost_funcs[k]=="class_error")
01030                 costs[k] = classification_loss(the_output, the_target);
01031             else if(cost_funcs[k]=="binary_class_error")
01032                 costs[k] = binary_classification_loss(the_output, the_target);
01033             else if(cost_funcs[k]=="multiclass_error")
01034                 costs[k] = multiclass_loss(the_output, the_target);
01035             else if(cost_funcs[k]=="cross_entropy")
01036                 costs[k] = cross_entropy(the_output, the_target);
01037             else if (cost_funcs[k]=="stable_cross_entropy") {
01038                 Var c = stable_cross_entropy(before_transfer_func, the_target);
01039                 costs[k] = c;
01040                 PLASSERT( classification_regularizer >= 0 );
01041                 if (classification_regularizer > 0) {
01042                     // There is a regularizer to add to the cost function.
01043                     dynamic_cast<NegCrossEntropySigmoidVariable*>((Variable*) c)->
01044                         setRegularizer(classification_regularizer);
01045                 }
01046             }
01047             else if (cost_funcs[k]=="margin_perceptron_cost")
01048                 costs[k] = margin_perceptron_cost(the_output,the_target,margin);
01049             else if (cost_funcs[k]=="lift_output")
01050                 costs[k] = lift_output(the_output, the_target);
01051             else  // Assume we got a Variable name and its options
01052             {
01053                 costs[k]= dynamic_cast<Variable*>(newObject(cost_funcs[k]));
01054                 if(costs[k].isNull())
01055                     PLERROR("In NNet::build_()  unknown cost_func option: %s",
01056                             cost_funcs[k].c_str());
01057                 costs[k]->setParents(the_output & the_target);
01058                 costs[k]->build();
01059             }
01060         }
01061 
01062         // ... and unsupervised cost, which is useless here 
01063         //     (autoassociator regularisation is incorporated elsewhere, in train())
01064         Vec val(1);
01065         val[0] = REAL_MAX;
01066         costs.push_back(new SourceVariable(val));
01067     }
01068     else // If in a mainly unsupervised phase ...
01069     {
01070         // ... insert supervised cost if supervised_signal_weight > 0 ...
01071         if(output_sup)
01072         {            
01073             int ncosts = cost_funcs.size();  
01074             costs.resize(ncosts);
01075         
01076             for(int k=0; k<ncosts; k++)
01077             {
01078                 // create costfuncs and apply individual weights if weightpart > 1
01079                 if(cost_funcs[k]=="mse")
01080                     costs[k]= sumsquare(output_sup-the_target);
01081                 else if(cost_funcs[k]=="mse_onehot")
01082                     costs[k] = onehot_squared_loss(output_sup, the_target);
01083                 else if(cost_funcs[k]=="NLL") 
01084                 {
01085                     if (output_sup->size() == 1) {
01086                         // Assume sigmoid output here!
01087                         costs[k] = cross_entropy(output_sup, the_target);
01088                     } else {
01089                         if (output_transfer_func == "log_softmax")
01090                             costs[k] = -output_sup[the_target];
01091                         else
01092                             costs[k] = neg_log_pi(output_sup, the_target);
01093                     }
01094                 } 
01095                 else if(cost_funcs[k]=="class_error")
01096                     costs[k] = classification_loss(output_sup, the_target);
01097                 else if(cost_funcs[k]=="binary_class_error")
01098                     costs[k] = binary_classification_loss(output_sup, the_target);
01099                 else if(cost_funcs[k]=="multiclass_error")
01100                     costs[k] = multiclass_loss(output_sup, the_target);
01101                 else if(cost_funcs[k]=="cross_entropy")
01102                     costs[k] = cross_entropy(output_sup, the_target);
01103                 else if (cost_funcs[k]=="stable_cross_entropy") {
01104                     Var c = stable_cross_entropy(before_transfer_func, the_target);
01105                     costs[k] = c;
01106                     PLASSERT( classification_regularizer >= 0 );
01107                     if (classification_regularizer > 0) {
01108                         // There is a regularizer to add to the cost function.
01109                         dynamic_cast<NegCrossEntropySigmoidVariable*>((Variable*) c)->
01110                             setRegularizer(classification_regularizer);
01111                     }
01112                 }
01113                 else if (cost_funcs[k]=="margin_perceptron_cost")
01114                     costs[k] = margin_perceptron_cost(output_sup,the_target,margin);
01115                 else if (cost_funcs[k]=="lift_output")
01116                     costs[k] = lift_output(output_sup, the_target);
01117                 else  // Assume we got a Variable name and its options
01118                 {
01119                     costs[k]= dynamic_cast<Variable*>(newObject(cost_funcs[k]));
01120                     if(costs[k].isNull())
01121                         PLERROR("In NNet::build_()  unknown cost_func option: %s",cost_funcs[k].c_str());
01122                     costs[k]->setParents(output_sup & the_target);
01123                     costs[k]->build();
01124                 }
01125 
01126                 costs[k] = supervised_signal_weight*costs[k];
01127             }                    
01128         }
01129         else // ... otherwise insert useless maximum cost variables ...
01130         {
01131             int ncosts = cost_funcs.size();  
01132             costs.resize(ncosts);
01133             Vec val(1);
01134             val[0] = REAL_MAX;
01135             for(int i=0; i<costs.length(); i++)
01136                 costs[i] = new SourceVariable(val);
01137         }
01138         Var c;
01139 
01140         // ... then insert appropriate unsupervised reconstruction cost ...
01141         if(supervised_signal_weight == 1) // ... unless only using supervised signal.
01142         {
01143             Vec val(1);
01144             val[0] = REAL_MAX;
01145             costs.push_back(new SourceVariable(val));
01146         }
01147         else
01148         {
01149             if(k_nearest_neighbors_reconstruction>=0)
01150             {
01151                 
01152                 VarArray copies(k_nearest_neighbors_reconstruction+1);
01153                 for(int n=0; n<k_nearest_neighbors_reconstruction+1; n++)
01154                 {
01155                     if(always_reconstruct_input || nhidden_schedule_position == 0)
01156                     {
01157                         if(input_reconstruction_error == "cross_entropy")
01158                             copies[n] = before_transfer_func;
01159                         else if (input_reconstruction_error == "mse")
01160                             copies[n] = the_output;
01161                     }
01162                     else
01163                         copies[n] = before_transfer_func;
01164                 }
01165                 
01166                 Var reconstruct = vconcat(copies);
01167                 
01168                 if(always_reconstruct_input || nhidden_schedule_position == 0)
01169                 {
01170                     if(input_reconstruction_error == "cross_entropy")
01171                         c = stable_cross_entropy(reconstruct, the_unsupervised_target);
01172                     else if (input_reconstruction_error == "mse")
01173                         c = sumsquare(reconstruct-the_unsupervised_target);
01174                     else PLERROR("In DeepFeatureExtractorNNet::buildCosts(): %s is not "
01175                                  "a valid reconstruction error", 
01176                                  input_reconstruction_error.c_str());
01177                 }
01178                 else
01179                     c = stable_cross_entropy(reconstruct, the_unsupervised_target);
01180                 
01181             }
01182             else
01183             {
01184                 if(always_reconstruct_input || nhidden_schedule_position == 0)
01185                 {
01186                     if(input_reconstruction_error == "cross_entropy")
01187                         c = stable_cross_entropy(before_transfer_func, 
01188                                                  the_unsupervised_target);
01189                     else if (input_reconstruction_error == "mse")
01190                         c = sumsquare(the_output-the_unsupervised_target);
01191                     else PLERROR("In DeepFeatureExtractorNNet::buildCosts(): %s is not "
01192                                  "a valid reconstruction error", 
01193                                  input_reconstruction_error.c_str());
01194                 }
01195                 else
01196                     c = stable_cross_entropy(before_transfer_func, 
01197                                              the_unsupervised_target);
01198             }
01199         
01200             if(output_sup) c = (1-supervised_signal_weight) * c + costs[0];
01201             costs.push_back(c);
01202         }
01203 
01204         PLASSERT( regularizer >= 0 );
01205         if (regularizer > 0) {
01206             // There is a regularizer to add to the cost function.
01207             dynamic_cast<NegCrossEntropySigmoidVariable*>((Variable*) c)->
01208                 setRegularizer(regularizer);
01209         }
01210     }
01211 
01212     // This is so that an EarlyStoppingOracle can be used to
01213     // do early stopping at each layer
01214     Vec pos(1);
01215     pos[0] = -nhidden_schedule_current_position;
01216     costs.push_back(new SourceVariable(pos));
01217 
01218     /*
01219      * weight and bias decay penalty
01220      */
01221 
01222     // create penalties
01223     buildPenalties();
01224     test_costs = hconcat(costs);
01225 
01226     // Apply penalty to cost.
01227     // If there is no penalty, we still add costs[0] as the first cost, in
01228     // order to keep the same number of costs as if there was a penalty.
01229     if(penalties.size() != 0) {
01230         // We only multiply by sampleweight if there are weights
01231         // and assign the appropriate training cost.
01232         if (weightsize_>0)
01233             if(nhidden_schedule_current_position < nhidden_schedule.length() 
01234                && supervised_signal_weight != 1)
01235                 training_cost = hconcat(
01236                     sampleweight*sum(hconcat(costs[costs.length()-2] & penalties))
01237                     & (test_costs*sampleweight));
01238             else
01239                 training_cost = hconcat(
01240                     sampleweight*sum(hconcat(costs[0] & penalties))
01241                     & (test_costs*sampleweight));
01242         else {
01243             if(nhidden_schedule_current_position < nhidden_schedule.length() 
01244                && supervised_signal_weight != 1)
01245                 training_cost = hconcat(sum(hconcat(costs[costs.length()-2] 
01246                                                     & penalties)) & test_costs);
01247             else
01248                 training_cost = hconcat(sum(hconcat(costs[0] & penalties)) 
01249                                         & test_costs);
01250         }
01251     }
01252     else {
01253         // We only multiply by sampleweight if there are weights
01254         // and assign the appropriate training cost.
01255         if(weightsize_>0) {
01256             if(nhidden_schedule_current_position < nhidden_schedule.length() 
01257                && supervised_signal_weight != 1)
01258                 training_cost = hconcat(costs[costs.length()-2]*sampleweight 
01259                                         & test_costs*sampleweight);
01260             else
01261                 training_cost = hconcat(costs[0]*sampleweight 
01262                                         & test_costs*sampleweight);
01263         } else {
01264             if(nhidden_schedule_current_position < nhidden_schedule.length() 
01265                && supervised_signal_weight != 1)                
01266                 training_cost = hconcat(costs[costs.length()-2] & test_costs);
01267             else
01268                 training_cost = hconcat(costs[0] & test_costs);
01269         }
01270     }
01271 
01272     training_cost->setName("training_cost");
01273     test_costs->setName("test_costs");
01274     the_output->setName("output");
01275 }
01276 
01277 
01278 Var DeepFeatureExtractorNNet::hiddenLayer(const Var& input, 
01279                                           const Var& weights, string transfer_func, 
01280                                           Var& before_transfer_function, 
01281                                           bool use_cubed_value) {
01282     Var hidden = affine_transform(input, weights); 
01283     if(use_cubed_value)
01284         hidden = pow(hidden,3);    
01285     before_transfer_function = hidden;
01286     Var result;
01287     if(transfer_func=="linear")
01288         result = hidden;
01289     else if(transfer_func=="tanh")
01290         result = tanh(hidden);
01291     else if(transfer_func=="sigmoid")
01292         result = sigmoid(hidden);
01293     else if(transfer_func=="softplus")
01294         result = softplus(hidden);
01295     else if(transfer_func=="exp")
01296         result = exp(hidden);
01297     else if(transfer_func=="softmax")
01298         result = softmax(hidden);
01299     else if (transfer_func == "log_softmax")
01300         result = log_softmax(hidden);
01301     else if(transfer_func=="hard_slope")
01302         result = unary_hard_slope(hidden,0,1);
01303     else if(transfer_func=="symm_hard_slope")
01304         result = unary_hard_slope(hidden,-1,1);
01305     else
01306         PLERROR("In DeepFeatureExtractorNNet::hiddenLayer - "
01307                 "Unknown value for transfer_func: %s",transfer_func.c_str());
01308     return result;
01309 }
01310 
01311 Var DeepFeatureExtractorNNet::hiddenLayer(const Var& input, 
01312                                           const Var& weights, const Var& bias, 
01313                                           bool transpose_weights,
01314                                           string transfer_func, 
01315                                           Var& before_transfer_function, 
01316                                           bool use_cubed_value) {
01317     Var hidden = bias_weight_affine_transform(input, weights, 
01318                                               bias,transpose_weights); 
01319     if(use_cubed_value)
01320         hidden = pow(hidden,3);    
01321     before_transfer_function = hidden;
01322     Var result;
01323     if(transfer_func=="linear")
01324         result = hidden;
01325     else if(transfer_func=="tanh")
01326         result = tanh(hidden);
01327     else if(transfer_func=="sigmoid")
01328         result = sigmoid(hidden);
01329     else if(transfer_func=="softplus")
01330         result = softplus(hidden);
01331     else if(transfer_func=="exp")
01332         result = exp(hidden);
01333     else if(transfer_func=="softmax")
01334         result = softmax(hidden);
01335     else if (transfer_func == "log_softmax")
01336         result = log_softmax(hidden);
01337     else if(transfer_func=="hard_slope")
01338         result = unary_hard_slope(hidden,0,1);
01339     else if(transfer_func=="symm_hard_slope")
01340         result = unary_hard_slope(hidden,-1,1);
01341     else
01342         PLERROR("In DeepFeatureExtractorNNet::hiddenLayer - "
01343                 "Unknown value for transfer_func: %s",transfer_func.c_str());
01344     return result;
01345 }
01346 
01347 void DeepFeatureExtractorNNet::buildPenalties() {
01348     // Prevents penalties from being added twice by consecutive builds
01349     penalties.resize(0);  
01350     if(weight_decay > 0 || bias_decay > 0)
01351     {
01352         for(int i=0; i<weights.length(); i++)
01353         {
01354             // If using same input and output weights,
01355             // then the weights do not include the bias!
01356             penalties.append(affine_transform_weight_penalty(
01357                                  weights[i], weight_decay, 
01358                                  use_same_input_and_output_weights ? 
01359                                  weight_decay : bias_decay, 
01360                                  penalty_type));
01361         }
01362         
01363         if(bias_decay > 0)
01364             for(int i=0; i<biases.length(); i++)
01365             {
01366                 penalties.append(affine_transform_weight_penalty(
01367                                      biases[i], bias_decay, 
01368                                      bias_decay, 
01369                                      penalty_type));
01370             }
01371 
01372 
01373         for(int i=0; i<reconstruction_weights.length(); i++)
01374         {
01375             penalties.append(affine_transform_weight_penalty(
01376                                  reconstruction_weights[i], 
01377                                  weight_decay, bias_decay, penalty_type));
01378         }                
01379     }
01380 }
01381 
01382 void DeepFeatureExtractorNNet::fillWeights(const Var& weights, 
01383                                            bool fill_first_row, 
01384                                            real fill_with_this) {
01385     if (initialization_method == "zero") {
01386         weights->value->clear();
01387         return;
01388     }
01389     real delta;
01390     int is = weights.length();
01391     if (fill_first_row)
01392         is--; // -1 to get the same result as before.
01393     if (initialization_method.find("linear") != string::npos)
01394         delta = 1.0 / real(is);
01395     else
01396         delta = 1.0 / sqrt(real(is));
01397     if (initialization_method.find("normal") != string::npos)
01398         random_gen->fill_random_normal(weights->value, 0, delta);
01399     else
01400         random_gen->fill_random_uniform(weights->value, -delta, delta);
01401     if (fill_first_row)
01402         weights->matValue(0).fill(fill_with_this);
01403 }
01404 
01405 void DeepFeatureExtractorNNet::buildFuncs(const Var& the_input, 
01406                                           const Var& the_output, 
01407                                           const Var& the_target, 
01408                                           const Var& the_sampleweight) {
01409     invars.resize(0);
01410     VarArray outvars;
01411     VarArray testinvars;
01412     if (the_input)
01413     {
01414         invars.push_back(the_input);
01415         testinvars.push_back(the_input);
01416     }
01417     if(k_nearest_neighbors_reconstruction>=0 
01418        && nhidden_schedule_current_position < nhidden_schedule.length())
01419     {
01420         invars.push_back(unsupervised_target);
01421         testinvars.push_back(unsupervised_target);
01422         if(neighbor_indices)
01423         {
01424             invars.push_back(neighbor_indices);
01425             testinvars.push_back(neighbor_indices);
01426         }
01427     }
01428     if (the_output)
01429         outvars.push_back(the_output);
01430     if(the_target)
01431     {
01432         invars.push_back(the_target);
01433         testinvars.push_back(the_target);
01434         outvars.push_back(the_target);
01435     }
01436     if(the_sampleweight)
01437     {
01438         invars.push_back(the_sampleweight);
01439     }
01440     f = Func(the_input, the_output);
01441     test_costf = Func(testinvars, the_output&test_costs);
01442     test_costf->recomputeParents();
01443     output_and_target_to_cost = Func(outvars, test_costs); 
01444     output_and_target_to_cost->recomputeParents();
01445 
01446     // To be used later, in the fine-tuning phase
01447     if(autoassociator_regularisation_weight>0 
01448        && nhidden_schedule_current_position < nhidden_schedule.length())
01449     {
01450         autoassociator_training_costs[nhidden_schedule_current_position] = 
01451             training_cost;
01452         autoassociator_params[nhidden_schedule_current_position].resize(
01453             params_to_train.length());
01454         for(int i=0; i<params_to_train.length(); i++)
01455             autoassociator_params[nhidden_schedule_current_position][i] = 
01456                 params_to_train[i];
01457     }
01458     to_feature_vector = Func(input,feature_vector);
01459 }
01460 
01461 
01462 } // end of namespace PLearn
01463 
01464 
01465 /*
01466   Local Variables:
01467   mode:c++
01468   c-basic-offset:4
01469   c-file-style:"stroustrup"
01470   c-file-offsets:((innamespace . 0)(inline-open . 0))
01471   indent-tabs-mode:nil
01472   fill-column:79
01473   End:
01474 */
01475 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines