PLearn 0.1
DistRepNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DistRepNNet.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* *******************************************************      
00038  * $Id: DistRepNNet.cc 3994 2005-08-25 13:35:03Z chapados $
00039  ******************************************************* */
00040 
00041 
00042 #include <plearn/var/SourceVariable.h>
00043 #include <plearn/var/VarRowsVariable.h>
00044 //#include <plearn/var/PotentialsVariable.h>
00045 #include <plearn/var/IsMissingVariable.h>
00046 #include <plearn/var/ReIndexedTargetVariable.h>
00047 #include <plearn/var/LogSoftmaxVariable.h>
00048 #include <plearn/var/AffineTransformVariable.h>
00049 #include <plearn/var/AffineTransformWeightPenalty.h>
00050 #include <plearn/var/BinaryClassificationLossVariable.h>
00051 #include <plearn/var/ClassificationLossVariable.h>
00052 #include <plearn/var/ConcatColumnsVariable.h>
00053 #include <plearn/var/ConcatColumnsVariable.h>
00054 #include <plearn/var/ConcatRowsVariable.h>
00055 #include <plearn/var/CrossEntropyVariable.h>
00056 #include <plearn/var/ExpVariable.h>
00057 #include <plearn/var/HeterogenuousAffineTransformVariable.h>
00058 #include <plearn/var/HeterogenuousAffineTransformWeightPenalty.h>
00059 #include <plearn/var/MarginPerceptronCostVariable.h>
00060 #include <plearn/var/MulticlassLossVariable.h>
00061 #include <plearn/var/NegCrossEntropySigmoidVariable.h>
00062 #include <plearn/var/InsertZerosVariable.h>
00063 #include <plearn/var/OneHotSquaredLoss.h>
00064 #include <plearn/var/SigmoidVariable.h>
00065 #include <plearn/var/SoftmaxVariable.h>
00066 #include <plearn/var/SoftplusVariable.h>
00067 #include <plearn/var/SubMatVariable.h>
00068 #include <plearn/var/SumVariable.h>
00069 #include <plearn/var/SumAbsVariable.h>
00070 #include <plearn/var/SumOfVariable.h>
00071 #include <plearn/var/SumSquareVariable.h>
00072 #include <plearn/var/TanhVariable.h>
00073 #include <plearn/var/TransposeVariable.h>
00074 #include <plearn/var/ProductVariable.h>
00075 #include <plearn/var/TransposeProductVariable.h>
00076 #include <plearn/var/UnaryHardSlopeVariable.h>
00077 #include <plearn/var/ArgmaxVariable.h>
00078 #include <plearn/var/Var_operators.h>
00079 #include <plearn/var/Var_utils.h>
00080 #include <plearn/var/FNetLayerVariable.h>
00081 //#include <plearn/display/DisplayUtils.h>
00082 
00083 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00084 #include "DistRepNNet.h"
00085 #include <plearn/math/random.h>
00086 #include <plearn/vmat/SubVMatrix.h>
00087 
00088 namespace PLearn {
00089 using namespace std;
00090 
00091 PLEARN_IMPLEMENT_OBJECT(DistRepNNet, "Feedforward Neural Network that learns Distributed Representations for symbolic data", 
00092                         "Inspired from the NNet class, DistRepNNet is simply an extension that deals with\n"
00093                         "symbolic data by learning a Distributed Representation for each type of symbolic\n" 
00094                         "data. The possible targets are defined either by the VMatrix's target field\n"
00095                         "dictionary or by a Dictionary provided by the user. Extra VMatrices corresponding\n"
00096                         "to extra tasks can also be provided to make use of inductive transfer between\n"
00097                         "tasks. It this case, the VMatrices need to have a dictionary for their target field.\n");
00098 
00099 DistRepNNet::DistRepNNet() // DEFAULT VALUES FOR ALL OPTIONS
00100     :
00101 nhidden(0),
00102 nhidden2(0),
00103 nhidden_theta_predictor(0),
00104 nhidden_dist_rep_predictor(0),
00105 weight_decay(0),
00106 bias_decay(0),
00107 input_dist_rep_predictor_bias_decay(0),
00108 output_dist_rep_predictor_bias_decay(0),
00109 input_dist_rep_predictor_weight_decay(0),
00110 output_dist_rep_predictor_weight_decay(0),
00111 layer1_weight_decay(0),
00112 layer1_bias_decay(0),
00113 layer1_theta_predictor_weight_decay(0),
00114 layer1_theta_predictor_bias_decay(0),
00115 layer2_weight_decay(0),
00116 layer2_bias_decay(0),
00117 output_layer_weight_decay(0),
00118 output_layer_bias_decay(0),
00119 output_layer_theta_predictor_weight_decay(0),
00120 output_layer_theta_predictor_bias_decay(0),
00121 direct_in_to_out_weight_decay(0),
00122 direct_in_to_out_bias_decay(0),
00123 margin(1),
00124 fixed_output_weights(0),
00125 direct_in_to_out(0),
00126 penalty_type("L2_square"),
00127 output_transfer_func(""),
00128 hidden_transfer_func("tanh"),
00129 do_not_change_params(false),
00130 batch_size(1),
00131 initialization_method("uniform_linear"),
00132 ntokens(-1),
00133 nfeatures_per_token(-1),
00134 //consider_unseen_classes(0),
00135 use_dist_reps(1),
00136 use_output_weights_bases(0),
00137 use_extra_tasks_only_on_first_epoch(false),
00138 initialize_sparse_params_to_zero(false)
00139 {}
00140 
00141 DistRepNNet::~DistRepNNet()
00142 {
00143 }
00144 
00145 void DistRepNNet::declareOptions(OptionList& ol)
00146 {
00147     declareOption(ol, "nhidden", &DistRepNNet::nhidden, OptionBase::buildoption, 
00148                   "Number of hidden units in first hidden layer (0 means no hidden layer).\n");
00149 
00150     declareOption(ol, "nhidden2", &DistRepNNet::nhidden2, OptionBase::buildoption, 
00151                   "Number of hidden units in second hidden layer (0 means no hidden layer).\n");
00152 
00153     declareOption(ol, "weight_decay", &DistRepNNet::weight_decay, OptionBase::buildoption, 
00154                   "Global weight decay for all layers.\n");
00155 
00156     declareOption(ol, "bias_decay", &DistRepNNet::bias_decay, OptionBase::buildoption, 
00157                   "Global bias decay for all layers.\n");
00158 
00159     declareOption(ol, "layer1_weight_decay", &DistRepNNet::layer1_weight_decay, OptionBase::buildoption, 
00160                   "Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00161 
00162     declareOption(ol, "layer1_bias_decay", &DistRepNNet::layer1_bias_decay, OptionBase::buildoption, 
00163                   "Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00164 
00165     declareOption(ol, "layer2_weight_decay", &DistRepNNet::layer2_weight_decay, OptionBase::buildoption, 
00166                   "Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00167 
00168     declareOption(ol, "layer2_bias_decay", &DistRepNNet::layer2_bias_decay, OptionBase::buildoption, 
00169                   "Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00170 
00171     declareOption(ol, "layer1_theta_predictor_weight_decay", &DistRepNNet::layer1_theta_predictor_weight_decay, OptionBase::buildoption, 
00172                   "Additional weight decay for the first hidden layer of the theta-predictor.  Is added to weight_decay.\n");
00173 
00174     declareOption(ol, "layer1_theta_predictor_bias_decay", &DistRepNNet::layer1_theta_predictor_bias_decay, OptionBase::buildoption, 
00175                   "Additional bias decay for the first hidden layer of the theta-predictor.  Is added to bias_decay.\n");
00176 
00177     declareOption(ol, "output_layer_weight_decay", &DistRepNNet::output_layer_weight_decay, OptionBase::buildoption, 
00178                   "Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00179 
00180     declareOption(ol, "output_layer_bias_decay", &DistRepNNet::output_layer_bias_decay, OptionBase::buildoption, 
00181                   "Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00182 
00183     declareOption(ol, "output_layer_theta_predictor_weight_decay", &DistRepNNet::output_layer_theta_predictor_weight_decay, OptionBase::buildoption, 
00184                   "Additional weight decay for the output layer of the theta-predictor.  Is added to 'weight_decay'.\n");
00185 
00186     declareOption(ol, "output_layer_theta_predictor_bias_decay", &DistRepNNet::output_layer_theta_predictor_bias_decay, OptionBase::buildoption, 
00187                   "Additional bias decay for the output layer of the theta-predictor.  Is added to 'bias_decay'.\n");
00188 
00189     declareOption(ol, "output_dist_rep_predictor_weight_decay", &DistRepNNet::output_dist_rep_predictor_weight_decay, OptionBase::buildoption, 
00190                   "Additional weight decay for the weights going from the hidden layer of the distributed representation predictor.  Is added to 'weight_decay'.\n");
00191 
00192     declareOption(ol, "output_dist_rep_predictor_bias_decay", &DistRepNNet::output_dist_rep_predictor_bias_decay, OptionBase::buildoption, 
00193                   "Additional bias decay for the weights going from the hidden layer of the distributed representation predictor.  Is added to 'bias_decay'.\n");
00194 
00195     declareOption(ol, "input_dist_rep_predictor_weight_decay", &DistRepNNet::input_dist_rep_predictor_weight_decay, OptionBase::buildoption, 
00196                   "Additional weight decay for the weights going from the input layer of the distributed representation predictor.  Is added to 'weight_decay'.\n");
00197 
00198     declareOption(ol, "input_dist_rep_predictor_bias_decay", &DistRepNNet::input_dist_rep_predictor_bias_decay, OptionBase::buildoption, 
00199                   "Additional bias decay for the weights going from the input layer of the distributed representation predictor.  Is added to 'bias_decay'.\n");
00200 
00201     declareOption(ol, "direct_in_to_out_weight_decay", &DistRepNNet::direct_in_to_out_weight_decay, OptionBase::buildoption,
00202                   "Additional weight decay for the weights going from the input directly to the output layer.  Is added to 'weight_decay'.\n");
00203 
00204     declareOption(ol, "direct_in_to_out_bias_decay", &DistRepNNet::direct_in_to_out_bias_decay, OptionBase::buildoption,
00205                   "Additional bias decay for the weights going from the input directly to the output layer.  Is added to 'bias_decay'.\n");
00206 
00207 
00208     declareOption(ol, "penalty_type", &DistRepNNet::penalty_type,
00209                   OptionBase::buildoption,
00210                   "Penalty to use on the weights (for weight and bias decay).\n"
00211                   "Can be any of:\n"
00212                   "  - \"L1\": L1 norm,\n"
00213                   "  - \"L1_square\": square of the L1 norm,\n"
00214                   "  - \"L2_square\" (default): square of the L2 norm.\n");
00215 
00216     declareOption(ol, "fixed_output_weights", &DistRepNNet::fixed_output_weights, OptionBase::buildoption, 
00217                   "If true then the output weights are not learned. They are initialized to +1 or -1 randomly.\n");
00218 
00219     declareOption(ol, "direct_in_to_out", &DistRepNNet::direct_in_to_out, OptionBase::buildoption, 
00220                   "If true then direct input to output weights will be added (if nhidden > 0).\n");
00221 
00222     declareOption(ol, "output_transfer_func", &DistRepNNet::output_transfer_func, OptionBase::buildoption, 
00223                   "what transfer function to use for ouput layer? One of: \n"
00224                   "  - \"tanh\" \n"
00225                   "  - \"sigmoid\" \n"
00226                   "  - \"exp\" \n"
00227                   "  - \"softplus\" \n"
00228                   "  - \"softmax\" \n"
00229                   "  - \"log_softmax\" \n"
00230                   "  - \"hard_slope\" \n"
00231                   "  - \"symm_hard_slope\" \n"
00232                   "An empty string or \"none\" means no output transfer function \n");
00233 
00234     declareOption(ol, "hidden_transfer_func", &DistRepNNet::hidden_transfer_func, OptionBase::buildoption, 
00235                   "What transfer function to use for hidden units? One of \n"
00236                   "  - \"linear\" \n"
00237                   "  - \"tanh\" \n"
00238                   "  - \"sigmoid\" \n"
00239                   "  - \"exp\" \n"
00240                   "  - \"softplus\" \n"
00241                   "  - \"softmax\" \n"
00242                   "  - \"log_softmax\" \n"
00243                   "  - \"hard_slope\" \n"
00244                   "  - \"symm_hard_slope\" \n");
00245 
00246     declareOption(ol, "cost_funcs", &DistRepNNet::cost_funcs, OptionBase::buildoption, 
00247                   "A list of cost functions to use\n"
00248                   "in the form \"[ cf1; cf2; cf3; ... ]\" where each function is one of: \n"
00249                   "  - \"mse_onehot\" (for classification)\n"
00250                   "  - \"NLL\" (negative log likelihood -log(p[c]) for classification) \n"
00251                   "  - \"class_error\" (classification error) \n"
00252                   "  - \"margin_perceptron_cost\" (a hard version of the cross_entropy, uses the 'margin' option)\n"
00253                   "The FIRST function of the list will be used as \n"
00254                   "the objective function to optimize \n"
00255                   "(possibly with an added weight decay penalty) \n");
00256   
00257     declareOption(ol, "margin", &DistRepNNet::margin, OptionBase::buildoption, 
00258                   "Margin requirement, used only with the margin_perceptron_cost cost function.\n"
00259                   "It should be positive, and larger values regularize more.\n");
00260 
00261     declareOption(ol, "do_not_change_params", &DistRepNNet::do_not_change_params, OptionBase::buildoption, 
00262                   "If set to 1, the weights won't be loaded nor initialized at build time.");
00263 
00264     declareOption(ol, "optimizer", &DistRepNNet::optimizer, OptionBase::buildoption, 
00265                   "Specify the optimizer to use\n");
00266 
00267     declareOption(ol, "batch_size", &DistRepNNet::batch_size, OptionBase::buildoption, 
00268                   "How many samples to use to estimate the avergage gradient before updating the weights\n"
00269                   "0 is equivalent to specifying training_set->length() \n");
00270 
00271     declareOption(ol, "dist_rep_dim", &DistRepNNet::dist_rep_dim, OptionBase::buildoption, 
00272                   " Dimensionality (number of components) of distributed representations.\n"
00273                   "The first element is the dimensionality of the input distributed representations"
00274                   "and the last one is the dimensionality of the target distributed representations."
00275 //                  "Those values are taken one by one, as the Dictionary objects are extracted.\n"
00276 //                  "When nnet_architecture == \"dist_rep_predictor\", the first element of dist_rep_dim\n"
00277 //                  "indicates the dimensionality to the predicted distributed representation.\n"
00278         );
00279 
00280 
00281     declareOption(ol, "initialization_method", &DistRepNNet::initialization_method, OptionBase::buildoption, 
00282                   "The method used to initialize the weights:\n"
00283                   " - \"normal_linear\"  = a normal law with variance 1/n_inputs\n"
00284                   " - \"normal_sqrt\"    = a normal law with variance 1/sqrt(n_inputs)\n"
00285                   " - \"uniform_linear\" = a uniform law in [-1/n_inputs, 1/n_inputs]\n"
00286                   " - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(n_inputs), 1/sqrt(n_inputs)]\n"
00287                   " - \"zero\"           = all weights are set to 0\n");
00288 
00289     declareOption(ol, "use_dist_reps", &DistRepNNet::use_dist_reps, OptionBase::buildoption, 
00290                   "Indication that distributed representations should be used");
00291 
00292     declareOption(ol, "use_output_weights_bases", &DistRepNNet::use_output_weights_bases, OptionBase::buildoption, 
00293                   "Indication that bases for output weights should be used");
00294 
00295     declareOption(ol, "use_extra_tasks_only_on_first_epoch", &DistRepNNet::use_extra_tasks_only_on_first_epoch, OptionBase::buildoption, 
00296                   "Indication that the extra tasks will only be used at the first epoch");
00297 
00298     declareOption(ol, "initialize_sparse_params_to_zero", &DistRepNNet::initialize_sparse_params_to_zero, OptionBase::buildoption, 
00299                   "Indication that the parameters on the sparse input should be initialized to zero");
00300 
00301     /*
00302     declareOption(ol, "nnet_architecture", &DistRepNNet::nnet_architecture, OptionBase::buildoption, 
00303                   "Architecture of the neural network:\n"
00304                   " - \"standard\"\n"
00305                   //" - \"csMTL\" (context-sensitive Multiple Task Learning, at NIPS 2005 Inductive Transfer Workshop)\n"
00306                   " - \"theta_predictor\" (standard NNet with output weights being PREDICTED) \n"
00307                   " - \"dist_rep_predictor\" (standard NNet with distributed representation being PREDICTED) \n"
00308                   " - \"linear\" (linear classifier that doesn't learn distributed representations) \n"
00309         );
00310     */
00311 
00312     declareOption(ol, "ntokens", &DistRepNNet::ntokens, OptionBase::buildoption, 
00313                   "Number of tokens, for which to predict a distributed representation.\n");    
00314 
00315     declareOption(ol, "nfeatures_per_token", &DistRepNNet::nfeatures_per_token, OptionBase::buildoption, 
00316                   "Number of features per token.\n");    
00317 
00318     declareOption(ol, "nfeatures_for_each_token", &DistRepNNet::nfeatures_for_each_token, OptionBase::buildoption, 
00319                   "Number of features for each token (nfeatures_per_token is used if nfeatures_for_each_token.length()==0).\n");    
00320 
00321     declareOption(ol, "nhidden_dist_rep_predictor", &DistRepNNet::nhidden_dist_rep_predictor, OptionBase::buildoption, 
00322                   "Number of hidden units of the neural network predictor for the distributed representation.\n");
00323 
00324     declareOption(ol, "target_dictionary", &DistRepNNet::target_dictionary, OptionBase::buildoption, 
00325                   "User specified Dictionary for the target field. If null, then it is extracted from the training set VMatrix.\n");
00326 
00327     declareOption(ol, "target_dist_rep", &DistRepNNet::target_dist_rep, OptionBase::buildoption, 
00328                   "User specified distributed representation for the target field. If null, then it is learned from the training set VMatrix.\n");
00329 
00330     declareOption(ol, "paramsvalues", &DistRepNNet::paramsvalues, OptionBase::learntoption, 
00331                   "The learned parameter vector\n");
00332 
00333     declareOption(ol, "nhidden_theta_predictor", &DistRepNNet::nhidden_theta_predictor, OptionBase::buildoption, 
00334                   "Number of hidden units of the neural network predictor for the hidden to output weights.\n");
00335 
00336     declareOption(ol, "extra_tasks", &DistRepNNet::extra_tasks, OptionBase::buildoption, 
00337                   "Extra tasks' datasets to train on.\n");
00338 
00339     declareOption(ol, "nhidden_extra_tasks", &DistRepNNet::nhidden_extra_tasks, OptionBase::buildoption, 
00340                   "Number of hidden units in first hidden layer for extra tasks (0 means no hidden layer).\n");
00341 
00342     declareOption(ol, "nhidden2_extra_tasks", &DistRepNNet::nhidden2_extra_tasks, OptionBase::buildoption, 
00343                   "Number of hidden units in second hidden layer for extra tasks (0 means no hidden layer).\n");
00344 
00345     declareOption(ol, "optimizer_extra_tasks", &DistRepNNet::optimizer_extra_tasks, OptionBase::buildoption, 
00346                   "Specify the optimizer to use for extra tasks.\n");
00347 
00348     declareOption(ol, "ntokens_extra_tasks", &DistRepNNet::ntokens_extra_tasks, OptionBase::buildoption, 
00349                   "Number of tokens, for which to predict a distributed representation for extra tasks.\n");
00350 
00351 //    declareOption(ol, "consider_unseen_classes", &DistRepNNet::consider_unseen_classes, OptionBase::buildoption, 
00352 //                  "Indication that the test classes may be unseen in the training set.\n");
00353 
00354     declareOption(ol, "train_set", &DistRepNNet::train_set, OptionBase::learntoption, 
00355                   "VMatrix used for training, that also provides information about the data (e.g. Dictionary objects for the different fields).\n");
00356 
00357 
00358     inherited::declareOptions(ol);
00359 
00360 }
00361 
00363 // build //
00365 void DistRepNNet::build()
00366 {
00367     inherited::build();
00368     build_();
00369 }
00370 
00371 Var DistRepNNet::buildSparseAffineTransform(VarArray weights, Var input, TVec<int> input_to_dict_index, int begin)
00372 {   
00373     TVec<bool> input_is_discrete(weights->length()-1);
00374     Vec missing_replace(weights->length()-1);
00375     for(int j=0; j<weights->length()-1; j++)
00376     {
00377         if(input_to_dict_index[begin+j] < 0)
00378         {
00379             input_is_discrete[j] = false;
00380             missing_replace[j] = 0;            
00381         }
00382         else
00383         {
00384             input_is_discrete[j] = true;
00385             missing_replace[j] = dictionaries[input_to_dict_index[begin+j]]->getId(dictionaries[input_to_dict_index[begin+j]]->oov_symbol);            
00386         }
00387     }
00388     if(weights.length()-1 == input->length())
00389         return heterogenuous_affine_transform(isMissing(input,true, true, missing_replace), weights, input_is_discrete);
00390     else
00391         return heterogenuous_affine_transform(isMissing(subMat(input,begin,0,weights.length()-1,1),true, true, missing_replace), weights, input_is_discrete);
00392 }
00393 
00394 Var DistRepNNet::buildSparseAffineTransformWeightPenalty(VarArray weights, Var input, TVec<int> input_to_dict_index, int begin, real weight_decay, real bias_decay, string penalty_type)
00395 {   
00396     TVec<bool> input_is_discrete(weights.length()-1);
00397     Vec missing_replace(weights.length()-1);
00398     for(int j=0; j<weights->length()-1; j++)
00399     {
00400         if(input_to_dict_index[begin+j] < 0)
00401         {
00402             input_is_discrete[j] = false;
00403             missing_replace[j] = 0;            
00404         }
00405         else
00406         {
00407             input_is_discrete[j] = true;
00408             missing_replace[j] = dictionaries[input_to_dict_index[begin+j]]->getId(dictionaries[input_to_dict_index[begin+j]]->oov_symbol);
00409         }
00410     }
00411     
00412     if(weights.length()-1 == input->length())
00413         return heterogenuous_affine_transform_weight_penalty(isMissing(input,true, true, missing_replace), weights, input_is_discrete, weight_decay, bias_decay, penalty_type);
00414     else
00415         return heterogenuous_affine_transform_weight_penalty(isMissing(subMat(input,begin,0,weights.length()-1,1),true, true, missing_replace), weights, input_is_discrete, weight_decay, bias_decay, penalty_type);
00416 }
00417 
00418 void DistRepNNet::buildVarGraph(int task_index)
00419 {
00420     VMat task_set;
00421     if(task_index < 0)
00422         task_set = train_set;
00423     else
00424         task_set = extra_tasks[task_index];    
00425 
00426     if(task_set->targetsize() != 1)
00427         PLERROR("In DistRepNNet::buildVarGraph(): task_set->targetsize() must be 1, not %d",targetsize_);    
00428 
00429     // Initialize the input.
00430     // This is where we construct the distributed representation
00431     // mappings (matrices).
00432     // The input is separated in two parts, one which corresponds
00433     // to symbolic data (uses distributed representations) and
00434     // one which corresponds to real valued data
00435     // Finaly, in order to figure out how many representation
00436     // mappings have to be constructed (since several input elements
00437     // might share the same Dictionary), we use the pointer
00438     // value of the Dictionaries 
00439 
00440     int n_dist_rep_input = 0;
00441     input_to_dict_index.resize(inputsize_);
00442     input_to_dict_index.fill(-1);
00443     target_dict_index = -1;
00444 
00445     //if(direct_in_to_out && nnet_architecture == "csMTL")
00446     //    PLERROR("In DistRepNNet::buildVarGraph(): direct_in_to_out cannot be used with \"csMTL\" architecture");
00447 
00448     // Associate input components with their corresponding
00449     // Dictionary and distributed representation
00450     for(int i=0; i<task_set->inputsize(); i++)
00451     {
00452         PP<Dictionary> dict = task_set->getDictionary(i);
00453 
00454         // Check if component has Dictionary
00455         if(dict)
00456         {
00457             // Find if Dictionary has already been added
00458             int f = dictionaries.find(dict);               
00459             if(f<0)
00460             {
00461                 dictionaries.push_back(dict);
00462                 input_to_dict_index[i] = dictionaries.size()-1;
00463             }
00464             else input_to_dict_index[i] = f;
00465             n_dist_rep_input++;
00466         }
00467     }
00468 
00469     // Add target Dictionary
00470     {
00471         PP<Dictionary> dict;
00472         if(target_dictionary && task_index < 0) dict = target_dictionary; 
00473         else dict = task_set->getDictionary(task_set->inputsize());
00474 
00475         // Check if component has Dictionary
00476         if(!dict) PLERROR("In DistRepNNet::buildVarGraph(): target component of task set has no Dictionary");
00477         // Find if Dictionary has already been added
00478         int f = dictionaries.find(dict);               
00479         if(f<0)
00480         {
00481             dictionaries.push_back(dict);
00482             target_dict_index = dictionaries.size()-1;
00483         }
00484         else
00485             target_dict_index = f;
00486     }
00487         
00488 //    if(dist_rep_dim.length() != dist_reps.length())
00489 //        PLWARNING("In DistRepNNet::buildVarGraph(): number of distributed representation sets (%d) and dimensionaly specification (dist_rep_dim.length()=%d) isn't the same", dist_reps.length(), dist_rep_dim.length());
00490         
00491     input = Var(task_set->inputsize(), "input");
00492     
00493     //if(nnet_architecture == "dist_rep_predictor" || nnet_architecture == "linear")
00494     //if(!use_dist_reps)
00495     //{
00496     //    ntokens = 1;
00497     //    nfeatures_per_token = task_set->inputsize();
00498     //    nhidden_dist_rep_predictor = -1;
00499     //    dim = dictionaries[target_dict_index]->size() + (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1);
00500     //}
00501 
00502     if(use_dist_reps)
00503     {
00504         int dim = dist_rep_dim[0];
00505         int this_ntokens;
00506         if(task_index < 0)
00507         {
00508             if(nfeatures_for_each_token.length() == 0)
00509             {
00510                 if(ntokens <= 0) PLERROR("In DistRepNNet::buildVarGraph(): ntokens should be > 0");
00511                 if(nfeatures_per_token <= 0) PLERROR("In DistRepNNet::buildVarGraph(): nfeatures_per_token should be > 0");
00512                 if(ntokens * nfeatures_per_token != task_set->inputsize()) PLERROR("In DistRepNNet::buildVarGraph(): ntokens * nfeatures_per_token != task_set->inputsize()");
00513             }
00514             else
00515             {
00516                 int sum_feat = 0;
00517                 for(int f=0; f<nfeatures_for_each_token.length(); f++)
00518                 {
00519                     if(nfeatures_for_each_token[f] <= 0) PLERROR("In DistRepNNet::buildVarGraph(): nfeatures_for_each_token[%d] should be > 0", f);
00520                     sum_feat += nfeatures_for_each_token[f];        
00521                 }
00522                 if(sum_feat != inputsize())
00523                     PLERROR("In DistRepNNet::buildVarGraph(): sum of nfeatures_for_each_token should be equal to inputsize");
00524                 if(nfeatures_for_each_token.length() != ntokens)
00525                     PLERROR("In DistRepNNet::buildVarGraph(): nfeatures_for_each_token should be of size ntokens=%d", ntokens);
00526             }
00527             this_ntokens = ntokens;
00528         }
00529         else
00530         {
00531             if(nfeatures_for_each_token.length() != 0)
00532                 PLERROR("In DistRepNNet::buildVarGraph(): usage of nfeatures_for_each_token with extra tasks is not supported yet");
00533             if(task_index >= ntokens_extra_tasks.length()) PLERROR("In DistRepNNet::buildVarGraph(): ntokens not defined for task %d", task_index);
00534             if(ntokens_extra_tasks[task_index] <= 0) PLERROR("In DistRepNNet::buildVarGraph(): ntokens[%d] should be > 0", task_index);
00535             if(nfeatures_per_token <= 0) PLERROR("In DistRepNNet::buildVarGraph(): nfeatures_per_token should be > 0");
00536             if(ntokens_extra_tasks[task_index] * nfeatures_per_token != task_set->inputsize()) PLERROR("In DistRepNNet::buildVarGraph(): ntokens_extra_task[%d] * nfeatures_per_token != task_set->inputsize()",task_index);
00537             this_ntokens = ntokens_extra_tasks[task_index];
00538         }
00539                 
00540         //activated_weights.resize(0);
00541         VarArray dist_reps(this_ntokens);
00542         VarArray dist_rep_hids(this_ntokens);
00543 
00544         if(nfeatures_for_each_token.length() != 0)
00545         {
00546             if(winputdistrep.length() == 0)
00547             {
00549                 winputdistrep.resize(sum(nfeatures_for_each_token)+this_ntokens);
00550                 int sum = 0;
00551                 int sum_dict = 0;
00552                 if(nhidden_dist_rep_predictor>0)
00553                     PLERROR("In DistRepNNet::buildVarGraph(): nhidden_dist_rep_predictor>0 is not supported with nfeatures_for_each_token");                    
00554                 for(int t=0; t<this_ntokens; t++)
00555                 {
00556                     if(nhidden_dist_rep_predictor > 0) winputdistrep[sum+nfeatures_for_each_token[t]] = Var(1,nhidden_dist_rep_predictor);
00557                     else winputdistrep[sum+nfeatures_for_each_token[t]] = Var(1,dim);
00558                     for(int j=0; j<nfeatures_for_each_token[t]; j++)
00559                     {
00560                         if(input_to_dict_index[sum_dict+j] < 0)
00561                             if(nhidden_dist_rep_predictor > 0) winputdistrep[sum+j] = Var(1,nhidden_dist_rep_predictor);
00562                             else winputdistrep[sum+j] = Var(1,dim);
00563                         else                            
00564                             if(nhidden_dist_rep_predictor > 0) winputdistrep[sum+j] = Var(dictionaries[input_to_dict_index[sum_dict+j]]->size()+1,nhidden_dist_rep_predictor);
00565                             else winputdistrep[sum+j] = Var(dictionaries[input_to_dict_index[sum_dict+j]]->size()+1,dim);
00566                         if(nhidden_dist_rep_predictor > 0) woutdistrep = Var(nhidden_dist_rep_predictor+1,dim);
00567                     }
00568                     sum += nfeatures_for_each_token[t]+1;
00569                     sum_dict += nfeatures_for_each_token[t];
00570                 }
00571                 params.append(winputdistrep);
00572                 partial_update_vars.append(winputdistrep);
00573                 if(nhidden_dist_rep_predictor > 0) params.append(woutdistrep); 
00574             }
00575 
00576             // Building var graph from input to distributed representations
00577             int sum = 0;
00578             int sum_dict = 0;
00579             for(int i=0; i<this_ntokens; i++)
00580             {
00581                 //if(nhidden_dist_rep_predictor > 0) dist_rep_hids[i] = buildSparseAffineTransform(winputdistrep, input, input_to_dict_index, i*nfeatures_per_token);
00582                 //else 
00583                 dist_reps[i] =  buildSparseAffineTransform(winputdistrep.subVarArray(sum,nfeatures_for_each_token[i]+1), input, input_to_dict_index, sum_dict);
00584                 
00585                 //if(nhidden_dist_rep_predictor > 0) 
00586                 //{
00587                 //    dist_rep_hids[i] = add_transfer_func(dist_rep_hids[i]);
00588                 //    dist_reps.append(affine_transform(dist_rep_hids[i],woutdistrep));
00589                 //}
00590                 sum += nfeatures_for_each_token[i]+1;
00591                 sum_dict += nfeatures_for_each_token[i];
00592             }
00593 
00594         }
00595         else
00596         {
00597             if(winputdistrep.length() == 0)
00598             {
00599                 winputdistrep.resize(nfeatures_per_token+1);
00600                 if(nhidden_dist_rep_predictor > 0) winputdistrep[nfeatures_per_token] = Var(1,nhidden_dist_rep_predictor);
00601                 else winputdistrep[nfeatures_per_token] = Var(1,dim);
00602                 for(int j=0; j<nfeatures_per_token; j++)
00603                 {
00604                     if(input_to_dict_index[j] < 0)
00605                         if(nhidden_dist_rep_predictor > 0) winputdistrep[j] = Var(1,nhidden_dist_rep_predictor);
00606                         else winputdistrep[j] = Var(1,dim);
00607                     else                            
00608                         if(nhidden_dist_rep_predictor > 0) winputdistrep[j] = Var(dictionaries[input_to_dict_index[j]]->size()+1,nhidden_dist_rep_predictor);
00609                         else winputdistrep[j] = Var(dictionaries[input_to_dict_index[j]]->size()+1,dim);
00610                     if(nhidden_dist_rep_predictor > 0) woutdistrep = Var(nhidden_dist_rep_predictor+1,dim);
00611                 }
00612                 params.append(winputdistrep);
00613                 partial_update_vars.append(winputdistrep);
00614                 if(nhidden_dist_rep_predictor > 0) params.append(woutdistrep);
00615             }
00616 
00617             // Building var graph from input to distributed representations
00618             for(int i=0; i<this_ntokens; i++)
00619             {
00620                 if(nhidden_dist_rep_predictor > 0) dist_rep_hids[i] = buildSparseAffineTransform(winputdistrep, input, input_to_dict_index, i*nfeatures_per_token);
00621                 else dist_reps[i] =  buildSparseAffineTransform(winputdistrep, input, input_to_dict_index, i*nfeatures_per_token);
00622                 
00623                 if(nhidden_dist_rep_predictor > 0) 
00624                 {
00625                     dist_rep_hids[i] = add_transfer_func(dist_rep_hids[i]);
00626                     dist_reps.append(affine_transform(dist_rep_hids[i],woutdistrep));
00627                 }
00628             }
00629             
00630         }
00631 
00632         if(task_index < 0)
00633         {
00634             // To construct the Func...
00635             if(nfeatures_for_each_token.length() != 0)
00636                 token_features = Var(sum(nfeatures_for_each_token));
00637             else
00638                 token_features = Var(nfeatures_per_token);
00639             //VarArray aw;
00640 
00641             if(nfeatures_for_each_token.length() != 0)
00642             {
00643                 int sum = 0;
00644                 int sum_dict = 0;
00645                 VarArray dist_reps(this_ntokens);
00646                 for(int i=0; i<this_ntokens; i++)
00647                 {
00648                     //if(nhidden_dist_rep_predictor > 0) dist_rep_hids[i] = buildSparseAffineTransform(winputdistrep, input, input_to_dict_index, i*nfeatures_per_token);
00649                     //else 
00650                     dist_reps[i] =  buildSparseAffineTransform(winputdistrep.subVarArray(sum,nfeatures_for_each_token[i]+1), token_features, input_to_dict_index, sum_dict);
00651                     
00652                     //if(nhidden_dist_rep_predictor > 0) 
00653                     //{
00654                     //    dist_rep_hids[i] = add_transfer_func(dist_rep_hids[i]);
00655                     //    dist_reps.append(affine_transform(dist_rep_hids[i],woutdistrep));
00656                     //}
00657                     sum += nfeatures_for_each_token[i]+1;
00658                     sum_dict += nfeatures_for_each_token[i];
00659                 }
00660                 dist_rep = vconcat(dist_reps);
00661             }
00662             else
00663             {
00664                 Var dist_rep_hid;
00665                 if(nhidden_dist_rep_predictor > 0) dist_rep_hid = buildSparseAffineTransform(winputdistrep, token_features, input_to_dict_index, 0);
00666                 else dist_rep =  buildSparseAffineTransform(winputdistrep, token_features, input_to_dict_index, 0);
00667                 
00668                 if(nhidden_dist_rep_predictor > 0) 
00669                 {
00670                     dist_rep_hid = add_transfer_func(dist_rep_hid);                
00671                     dist_rep = affine_transform(dist_rep_hid,woutdistrep);
00672                 }
00673             }
00674         }
00675         
00676         dp_input = vconcat(dist_reps);
00677     }
00678 
00679     if(fixed_output_weights && !use_dist_reps && (task_index < 0 && nhidden <= 0 || task_index>=0 && nhidden_extra_tasks[task_index] <= 0))
00680         PLERROR("In DistRepNNet::buildVarGraph(): fixed output weights is not implemented for sparse input and no hidden layers");    
00681 
00682     // Build main network graph.
00683     buildOutputFromInput(task_index);
00684 
00685     target = Var(targetsize_);
00686     TVec<int> target_cols(1);
00687     target_cols[0] = task_set->inputsize();
00688 
00689     Var reind_target;
00690     if(target_dictionary && task_index < 0)
00691         reind_target = reindexed_target(target,input,target_dictionary,target_cols);
00692     else
00693         reind_target = reindexed_target(target,input,task_set,target_cols);
00694     //reind_target = target;
00695 
00696     if(weightsize_>0)
00697     {
00698         if (weightsize_!=1)
00699             PLERROR("In DistRepNNet::buildVarGraph(): expected weightsize to be 1 or 0 (or unspecified = -1, meaning 0), got %d",weightsize_);
00700         sampleweight = Var(1, "weight");
00701     }
00702 
00703     string pt = lowerstring( penalty_type );
00704     if( pt == "l1" )
00705         penalty_type = "L1";
00706     else if( pt == "l1_square" || pt == "l1 square" || pt == "l1square" )
00707         penalty_type = "L1_square";
00708     else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00709         penalty_type = "L2_square";
00710     else if( pt == "l2" )
00711     {
00712         PLWARNING("L2 penalty not supported, assuming you want L2 square");
00713         penalty_type = "L2_square";
00714     }
00715     else
00716         PLERROR("penalty_type \"%s\" not supported", penalty_type.c_str());
00717 
00718     buildCosts(output, reind_target, task_index);
00719 
00720     // Build functions.
00721     if(task_index < 0)
00722         buildFuncs(invars);
00723     else
00724         buildFuncs(invars_extra_tasks[task_index]);
00725 
00726 //    if(consider_unseen_classes) cost_paramf.resize(getTrainCostNames().length());
00727     
00728 }
00729 
00731 // build_ //
00733 void DistRepNNet::build_()
00734 {
00735     /*
00736      * Create Topology Var Graph
00737      */
00738 
00739     // Don't do anything if we don't have a train_set
00740     // It's the only one who knows the inputsize, targetsize and weightsize,
00741     // and it contains the Dictionaries...
00742 
00743     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00744     {
00745         if(targetsize_ != 1)
00746             PLERROR("In DistRepNNet::build_(): targetsize_ must be 1, not %d",targetsize_);
00747         if(fixed_output_weights && use_output_weights_bases)
00748             PLERROR("In DistRepNNet::build_(): output weights cannot be fixed (i.e. fixed_output_weights=1) and predicted (i.e. use_output_weights_bases=1)");
00749         if(direct_in_to_out && use_output_weights_bases)
00750             PLERROR("In DistRepNNet::build_(): direct input to output weights cannot be used with output weights bases");        
00751         if(!use_output_weights_bases && !use_dist_reps 
00752            && extra_tasks.length() != 0)
00753             PLERROR("In DistRepNNet::build_(): it is useless to have extra tasks and not use distributed\n"
00754                 "representations or output weights bases.");
00755 
00756         dictionaries.resize(0);
00757         partial_update_vars.resize(0);
00758         //partial_update_vars_extra_tasks.resize(extra_tasks.length());
00759         //for(int t=0; t<partial_update_vars_extra_tasks.length(); t++)
00760         //    partial_update_vars_extra_tasks[t].resize(0);
00761         params.resize(0);
00762         training_cost_extra_tasks.resize(0);        
00763         invars_extra_tasks.resize(extra_tasks.length());
00764 
00765         // Reset shared parameters
00766         winputdistrep.resize(0);
00767         woutdistrep = (Variable*) NULL;
00768         w1theta = (Variable*) NULL;
00769         wouttheta = (Variable*) NULL;
00770 
00771         for(int t=0; t<extra_tasks.length(); t++)
00772         {
00773             // Reset parameters variable
00774             w1 = (Variable*) NULL;
00775             w2 = (Variable*) NULL;
00776             wout = (Variable*) NULL;
00777             direct_wout = (Variable*) NULL;
00778             outbias = (Variable*) NULL;        
00779             winputsparse.resize(0);
00780             winputsparse_weight_decay = 0;
00781             winputsparse_bias_decay = 0;
00782 
00783             buildVarGraph(t);
00784             initializeParams(true,t);
00785         }
00786 
00787         // Reset parameters variable
00788         w1 = (Variable*) NULL;
00789         w2 = (Variable*) NULL;
00790         wout = (Variable*) NULL;
00791         direct_wout = (Variable*) NULL;
00792         outbias = (Variable*) NULL;
00793         winputsparse.resize(0);
00794         winputsparse_weight_decay = 0;
00795         winputsparse_bias_decay = 0;
00796 
00797         buildVarGraph(-1);
00798         initializeParams();                        
00799 
00800         // Shared values hack...
00801         if (!do_not_change_params) {
00802             if(paramsvalues.length() == params.nelems())
00803                 params << paramsvalues;
00804             else
00805             {
00806                 paramsvalues.resize(params.nelems());
00807                 if(optimizer)
00808                     optimizer->reset();
00809                 for(int t=0; t<optimizer_extra_tasks.length(); t++)
00810                     optimizer_extra_tasks[t]->reset();
00811             }
00812             params.makeSharedValue(paramsvalues);
00813         }
00814         
00815         output_comp.resize(1);
00816         options.resize(0);
00817         //output_comp.resize(outputsize());
00818     }
00819 }
00820 
00822 // buildCosts //
00824 void DistRepNNet::buildCosts(const Var& the_output, const Var& the_target, int task_index) {
00825     int ncosts = cost_funcs.size();  
00826     if(ncosts<=0)
00827         PLERROR("In DistRepNNet::buildCosts - Empty cost_funcs : must at least specify the cost function to optimize!");
00828     costs.resize(ncosts);
00829 
00830     for(int k=0; k<ncosts; k++)
00831     {
00832         // create costfuncs and apply individual weights if weightpart > 1
00833         if(cost_funcs[k]=="mse_onehot")
00834             costs[k] = onehot_squared_loss(the_output, the_target);
00835         else if(cost_funcs[k]=="NLL") 
00836         {
00837             if (the_output->size() == 1) {
00838                 // Assume sigmoid output here!
00839                 costs[k] = cross_entropy(the_output, the_target);
00840             } else {
00841                 if (output_transfer_func == "log_softmax")
00842                     costs[k] = -the_output[the_target];
00843                 else
00844                     costs[k] = neg_log_pi(the_output, the_target);
00845             }
00846         } 
00847         else if(cost_funcs[k]=="class_error")
00848             costs[k] = classification_loss(the_output, the_target);
00849         else if (cost_funcs[k]=="margin_perceptron_cost")
00850             costs[k] = margin_perceptron_cost(the_output,the_target,margin);
00851         else  // Assume we got a Variable name and its options
00852         {
00853             costs[k]= dynamic_cast<Variable*>(newObject(cost_funcs[k]));
00854             if(costs[k].isNull())
00855                 PLERROR("In DistRepNNet::build_()  unknown cost_func option: %s",cost_funcs[k].c_str());
00856             costs[k]->setParents(the_output & the_target);
00857             costs[k]->build();
00858         }
00859     }
00860 
00861 
00862     /*
00863      * weight and bias decay penalty
00864      */
00865 
00866     // create penalties
00867     
00868     int this_ntokens;
00869     if(task_index < 0)
00870         this_ntokens = ntokens;
00871     else
00872         this_ntokens = ntokens_extra_tasks[task_index];
00873 
00874     buildPenalties(this_ntokens);
00875     test_costs = hconcat(costs);
00876 
00877     // Apply penalty to cost.
00878     // If there is no penalty, we still add costs[0] as the first cost, in
00879     // order to keep the same number of costs as if there was a penalty.
00880     if(penalties.size() != 0) {
00881         if (weightsize_>0)
00882             // only multiply by sampleweight if there are weights
00883             training_cost = hconcat(sampleweight*sum(hconcat(costs[0] & penalties))
00884                                     & (test_costs*sampleweight));
00885         else {
00886             training_cost = hconcat(sum(hconcat(costs[0] & penalties)) & test_costs);
00887         }
00888     } 
00889     else {
00890         if(weightsize_>0) {
00891             // only multiply by sampleweight if there are weights
00892             training_cost = hconcat(costs[0]*sampleweight & test_costs*sampleweight);
00893         } else {
00894             training_cost = hconcat(costs[0] & test_costs);
00895         }
00896     }
00897 
00898     if(task_index >= 0) training_cost_extra_tasks.push_back(training_cost);
00899 
00900     training_cost->setName("training_cost");
00901     test_costs->setName("test_costs");
00902     the_output->setName("output");
00903 }
00904 
00906 // buildFuncs //
00908 void DistRepNNet::buildFuncs(VarArray& invars) {
00909     invars.resize(0);
00910     VarArray outvars;
00911     VarArray testinvars;
00912     if (input)
00913     {
00914         invars.push_back(input);
00915         testinvars.push_back(input);
00916     }
00917     if (output)
00918         outvars.push_back(output);
00919     if(target)
00920     {
00921         invars.push_back(target);
00922         testinvars.push_back(target);
00923         outvars.push_back(target);
00924     }
00925     if(sampleweight)
00926     {
00927         invars.push_back(sampleweight);
00928     }
00929     f = Func(input, argmax(output));
00930     //f = Func(input, output);
00931     test_costf = Func(testinvars, argmax(output)&test_costs);
00932     //test_costf = Func(testinvars,output&test_costs);
00933     test_costf->recomputeParents();
00934     if(dist_rep)
00935         token_to_dist_rep = Func(token_features,dist_rep);
00936     paramf = Func(invars, training_cost); 
00937     //displayFunction(paramf, true, false, 250);
00938 }
00939 
00941 // buildOutputFromInput //
00943 void DistRepNNet::buildOutputFromInput(int task_index) {
00944     
00945     /*
00946     if(nnet_architecture == "csMTL")
00947     {
00948         // The idea is to output a "potential" for each
00949         // target possibility...
00950         // Hence, we need to make a propagation path from
00951         // the computations using only the input part
00952         // (and hence commun to all targets) and the
00953         // target disptributed representation, to the potential output.
00954         // In order to know what are the possible targets,
00955         // the train_set vmat, which contains the target
00956         // Dictionary, will be used.
00957 
00958         // Computations common to all targets
00959         if(nhidden>0)
00960         {
00961             w1 = Var(1 + dp_input->size(), nhidden, "w1");
00962             params.append(w1);
00963             output = affine_transform(dp_input, w1); 
00964         }
00965         else
00966         {
00967             wout = Var(1 + dp_input->size(), outputsize(), "wout");
00968             output = affine_transform(dp_input, wout);
00969             if(!fixed_output_weights)
00970             {
00971                 params.append(wout);
00972             }
00973             else
00974             {
00975                 outbias = Var(output->size(),"outbias");
00976                 output = output + outbias;
00977                 params.append(outbias);
00978             }
00979         }
00980 
00981         Var comp_input = output;
00982         Var dp_target = Var(1,dist_rep_dim[target_dict_index]);
00983 
00984         VarArray proppath_params;
00985         if(nhidden>0)
00986         {
00987             w1target = Var( dp_target->size(),nhidden, "w1target");      
00988             params.append(w1target);
00989             proppath_params.append(w1target);
00990             output = output + product(dp_target, w1target);
00991             output = add_transfer_func(output);
00992         }
00993         else
00994         {
00995             wouttarget = Var(dp_target->size(),outputsize(), "wouttarget");
00996             if (!fixed_output_weights)        
00997             {
00998                 params.append(wouttarget);        
00999                 proppath_params.append(wouttarget);        
01000             }
01001             output = output + product(dp_target,wouttarget);
01002             //output = add_transfer_func(output);
01003         }
01004     
01005         // second hidden layer
01006         if(nhidden2>0)
01007         {
01008             w2 = Var(1 + output.length(), nhidden2, "w2");
01009             params.append(w2);
01010             proppath_params.append(w2);
01011             output = affine_transform(output,w2);
01012             output = add_transfer_func(output);
01013         }
01014 
01015         if (nhidden2>0 && nhidden==0)
01016             PLERROR("DistRepNNet:: can't have nhidden2 (=%d) > 0 while nhidden=0",nhidden2);
01017 
01018         // output layer before transfer function when there is at least one hidden layer
01019         if(nhidden > 0)
01020         {
01021             wout = Var(1 + output->size(), outputsize(), "wout");
01022             output = affine_transform(output, wout);
01023 
01024             if (!fixed_output_weights)
01025             {
01026                 params.append(wout);
01027                 proppath_params.append(wout);
01028             }
01029             else
01030             {
01031                 outbias = Var(output->size(),"outbias");
01032                 output = output + outbias;
01033                 params.append(outbias);
01034                 proppath_params.append(outbias);
01035             }
01036         }
01037 
01038         output = potentials(input,comp_input,dp_target,dist_reps[target_dict_index], output, proppath_params, train_set);
01039         partial_update_vars.push_back(dist_reps[target_dict_index]);
01040     }
01041     else 
01042     */
01043 
01044     int this_nhidden;
01045     int this_nhidden2;
01046     if(task_index < 0)
01047     {
01048         this_nhidden = nhidden;
01049         this_nhidden2 = nhidden2;
01050     }
01051     else
01052     {
01053         this_nhidden = nhidden_extra_tasks[task_index];
01054         this_nhidden2 = nhidden2_extra_tasks[task_index];
01055     }
01056 
01057     if(!use_dist_reps)
01058     {
01059         if(!use_output_weights_bases)
01060         {
01061             // These weights will be used as the input weights of the neural
01062             // network, instead of w1.
01063             int dim;       
01064             if(this_nhidden > 0) dim = this_nhidden;
01065             else dim = dictionaries[target_dict_index]->size(); //+ (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1);
01066             winputsparse.resize(input->length()+1);
01067             winputsparse[input->length()] = Var(1,dim);
01068             for(int j=0; j<winputsparse.length()-1; j++)
01069             {
01070                 if(input_to_dict_index[j] < 0)
01071                     winputsparse[j] = Var(1,dim);
01072                 else
01073                     winputsparse[j] = Var(dictionaries[input_to_dict_index[j]]->size()+1,dim);
01074             }
01075             params.append(winputsparse);
01076             partial_update_vars.append(winputsparse);            
01077         }
01078 
01079         if(this_nhidden>0)
01080         {
01081             output = buildSparseAffineTransform(winputsparse,input,input_to_dict_index,0); 
01082             output = add_transfer_func(output);
01083 
01084             winputsparse_weight_decay = weight_decay + layer1_weight_decay;
01085             winputsparse_bias_decay = bias_decay + layer1_bias_decay;
01086 
01087             // ici: Faire direct in to out
01088 
01089             if(direct_in_to_out)
01090             {
01091                 PLERROR("In buildOutputFromInput(): direct_in_to_out option not implemented for sparse input.");
01092                 direct_wout = Var(dictionaries[target_dict_index]->size()
01093                                   //+ (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1)
01094                                   , dp_input->size(), "direct_wout");
01095                 params.append(direct_wout);
01096             }                           
01097         }
01098 
01099         // second hidden layer
01100         if(this_nhidden2>0)
01101         {
01102             w2 = Var(1 + output.length(), this_nhidden2, "w2");
01103             params.append(w2);
01104             output = affine_transform(output,w2);
01105             output = add_transfer_func(output);
01106             wout = Var(1 + this_nhidden2, dictionaries[target_dict_index]->size()
01107                        //+ (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1)
01108                        , "wout");
01109         }
01110         else if(this_nhidden > 0) wout = Var(1 + this_nhidden, dictionaries[target_dict_index]->size()
01111                                              //+ (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1)
01112                                              , "wout");
01113 
01114         if (this_nhidden2>0 && this_nhidden==0)
01115             PLERROR("DistRepNNet:: can't have nhidden2 (=%d) > 0 while nhidden=0",this_nhidden2);
01116 
01117         if(this_nhidden > 0)
01118         {            
01119             // ici: ajouter option sans biais pour sparse product...
01120             if(direct_in_to_out)
01121                 output = affine_transform(output, wout) + product(direct_wout,dp_input);
01122             else
01123                 output = affine_transform(output, wout);
01124         }
01125         else
01126         {
01127             output = buildSparseAffineTransform(winputsparse,input,input_to_dict_index,0); 
01128 
01129             winputsparse_weight_decay = weight_decay + output_layer_weight_decay;
01130             winputsparse_bias_decay = bias_decay + output_layer_bias_decay;
01131         }
01132 
01133         if(fixed_output_weights)
01134         {
01135             outbias = Var(output->size(),"outbias");
01136             output = output + outbias;
01137             params.append(outbias);
01138         }
01139         else if(this_nhidden>0) params.append(wout);
01140 
01141         //output = transpose(output);
01142     }
01143     else
01144     {
01145         int before_output_size = dp_input->size();
01146         if(this_nhidden > 0) before_output_size = this_nhidden;
01147         if(this_nhidden2 > 0) before_output_size = this_nhidden2;
01148 
01149         /*if(possible_targets_varies)
01150         {            
01151             PLERROR("In DistRepNNet::buildOutputFromInput(): possible_targets_varies is not implemented yet");
01152             if(use_output_weights_bases)
01153                 ;
01154         }
01155         else*/
01156         {
01157             if(use_output_weights_bases)
01158             {
01159                 PLERROR("In DistRepNNet::buildOutputFromInput(): use_output_weights_bases is not implemented yet");
01160 
01161                 // À faire: faire vérification pour partager les 
01162                 // représentations lorsque les dictionnaires sont les mêmes...
01163                 // Ne pas oublier de partager les poids w1theta et wouttheta
01164 
01165                 //dp_all_targets = Var(dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1,dist_rep_dim.last(),"dp_all_targets");
01166                 // Penser à l'initialisation!!!! Comment faire!!!
01167                 params.append(dp_all_targets);
01168                 
01169                 if(nhidden_theta_predictor>0)
01170                 {
01171                     if(!w1theta) 
01172                     {
01173                         w1theta = Var(dp_all_targets->length()+1,nhidden_theta_predictor,"w1theta");
01174                         params.append(w1theta);
01175                     }
01176                     wout = new MatrixAffineTransformVariable(dp_all_targets,w1theta);
01177                     wout = add_transfer_func(wout);
01178                 }
01179                 else
01180                     wout = dp_all_targets;
01181             
01182             
01183                 if(!wouttheta)
01184                 {
01185                     wouttheta = Var(wout->length()+1,before_output_size+1, "wouttheta");
01186                     params.append(wouttheta);
01187                 }
01188                 wout = new MatrixAffineTransformVariable(wout,wouttheta);
01189             }
01190             else
01191             {
01192                 wout = Var(1 + before_output_size, dictionaries[target_dict_index]->size()
01193                            //+ (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1)
01194                            , "wout");                
01195             }
01196         }
01197 
01198         if(this_nhidden>0)
01199         {
01200             w1 = Var(1 + dp_input->size(), this_nhidden, "w1");
01201             params.append(w1);
01202             
01203             output = affine_transform(dp_input, w1); 
01204             output = add_transfer_func(output);
01205             
01206             if(direct_in_to_out)
01207             {
01208                 direct_wout = Var(dictionaries[target_dict_index]->size()
01209                                   //+ (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1)
01210                                   , dp_input->size(), "direct_wout");
01211                 params.append(direct_wout);
01212             }                           
01213         }
01214 
01215         // second hidden layer
01216         if(this_nhidden2>0)
01217         {
01218             w2 = Var(1 + output.length(), this_nhidden2, "w2");
01219             params.append(w2);
01220             output = affine_transform(output,w2);
01221             output = add_transfer_func(output);
01222         }
01223 
01224         if (this_nhidden2>0 && this_nhidden==0)
01225             PLERROR("DistRepNNet:: can't have nhidden2 (=%d) > 0 while nhidden=0",this_nhidden2);
01226 
01227         if(this_nhidden > 0)
01228         {            
01229             if(direct_in_to_out)
01230                 output = affine_transform(output, wout) + product(direct_wout,dp_input);
01231             else
01232                 output = affine_transform(output, wout);
01233         }
01234         else
01235         {
01236             if(use_dist_reps)
01237                 output = affine_transform(dp_input, wout);
01238             else
01239                 output = buildSparseAffineTransform(winputsparse,input,input_to_dict_index,0); 
01240         }
01241 
01242         if(fixed_output_weights)
01243         {
01244             outbias = Var(output->size(),"outbias");
01245             output = output + outbias;
01246             params.append(outbias);
01247         }
01248         else if(use_dist_reps)
01249             params.append(wout);
01250 
01251         //output = transpose(output);
01252     }
01253     /*
01254     TVec<bool> class_tag(dictionaries[target_dict_index]->size() + (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1));
01255     Vec row(train_set.width());
01256     int target;
01257     class_tag.fill(0);
01258     for(int i=0; i<train_set.length(); i++)
01259     {
01260         train_set->getRow(i,row);
01261         target = (int) row[train_set->inputsize()];
01262         class_tag[target] = 1;
01263     }
01264 
01265     Vec seen_target_vec(0);
01266     seen_target.resize(0);
01267     unseen_target.resize(0);
01268     for(int i=0; i<class_tag.length(); i++)
01269         if(class_tag[i])
01270         {
01271             seen_target_vec.push_back(i);
01272             seen_target.push_back(i);
01273         }
01274         else unseen_target.push_back(i);
01275 
01276     if(seen_target_vec.length() != class_tag.length())
01277         train_output = new VarRowsVariable(output,new SourceVariable(seen_target_vec));
01278     */
01279 
01280     // output_transfer_func
01281     if(output_transfer_func!="" && output_transfer_func!="none")
01282     {
01283         /*
01284         if(consider_unseen_classes)
01285             output = insert_zeros(add_transfer_func(output, output_transfer_func),seen_target);
01286         else
01287         */
01288             output = add_transfer_func(output, output_transfer_func);
01289     }
01290     /*
01291     else
01292     {
01293         if(consider_unseen_classes)
01294             output = insert_zeros(output,seen_target);
01295     }
01296     */
01297 
01298     /*
01299     if(train_output)
01300         if(output_transfer_func!="" && output_transfer_func!="none")
01301             train_output = insert_zeros(add_transfer_func(train_output, output_transfer_func),unseen_target);
01302         else
01303             train_output = insert_zeros(train_output,unseen_target);
01304     */
01305 }
01306 
01308 // buildPenalties //
01310 void DistRepNNet::buildPenalties(int this_ntokens) {
01311     penalties.resize(0);  // prevents penalties from being added twice by consecutive builds
01312     if(w1 && ((layer1_weight_decay + weight_decay)!=0 || (layer1_bias_decay + bias_decay)!=0))
01313         penalties.append(affine_transform_weight_penalty(w1, (layer1_weight_decay + weight_decay), (layer1_bias_decay + bias_decay), penalty_type));
01314     if(w1theta && ((layer1_theta_predictor_weight_decay + weight_decay)!=0 || (layer1_theta_predictor_bias_decay + bias_decay)!=0))
01315         penalties.append(affine_transform_weight_penalty(w1theta, (layer1_theta_predictor_weight_decay + weight_decay), (layer1_theta_predictor_bias_decay + bias_decay), penalty_type));
01316     if(w2 && ((layer2_weight_decay + weight_decay)!=0 || (layer2_bias_decay + bias_decay)!=0))
01317         penalties.append(affine_transform_weight_penalty(w2, (layer2_weight_decay + weight_decay), (layer2_bias_decay + bias_decay), penalty_type));
01318     if(!use_output_weights_bases && wout && ((output_layer_weight_decay + weight_decay)!=0 || (output_layer_bias_decay + bias_decay)!=0))
01319         penalties.append(affine_transform_weight_penalty(wout, (output_layer_weight_decay + weight_decay), 
01320                                                          (output_layer_bias_decay + bias_decay), penalty_type));
01321     if(wouttheta && ((output_layer_theta_predictor_weight_decay + weight_decay)!=0 || (output_layer_theta_predictor_bias_decay + bias_decay)!=0))
01322         penalties.append(affine_transform_weight_penalty(wouttheta, (output_layer_theta_predictor_weight_decay + weight_decay), 
01323                                                          (output_layer_theta_predictor_bias_decay + bias_decay), penalty_type));
01324     if(woutdistrep && ((output_dist_rep_predictor_weight_decay + weight_decay)!=0 || (output_dist_rep_predictor_bias_decay + bias_decay)!=0))
01325         penalties.append(affine_transform_weight_penalty(woutdistrep, (output_dist_rep_predictor_weight_decay + weight_decay), 
01326                                                          (output_dist_rep_predictor_bias_decay + bias_decay), penalty_type));
01327     if(direct_wout && ((direct_in_to_out_weight_decay + weight_decay)!=0 || (direct_in_to_out_bias_decay + bias_decay)!=0))
01328         penalties.append(affine_transform_weight_penalty(direct_wout, (direct_in_to_out_weight_decay + weight_decay),
01329                                                          (direct_in_to_out_bias_decay + bias_decay), penalty_type));
01330 
01331     // Here, affine_transform_weight_penalty is not used differently, since the weight variables don't correspond
01332     // to an affine_transform (i.e. doesn't contain biases AND a weights)
01333     if(winputdistrep.length() != 0 && (input_dist_rep_predictor_weight_decay + weight_decay + input_dist_rep_predictor_bias_decay + bias_decay))
01334     {
01335         /*
01336         for(int i=0; i<activated_weights.length(); i++)
01337         {
01338             if(input_to_dict_index[i%nfeatures_per_token] < 0)
01339             {
01340                 // Add those weights in the penalties only once
01341                 if(i<nfeatures_per_token)
01342                     penalties.append(affine_transform_weight_penalty(winputdistrep[i], (input_dist_rep_predictor_weight_decay + weight_decay), 
01343                                                                      (input_dist_rep_predictor_weight_decay + weight_decay), penalty_type));
01344             }
01345             else
01346                 // Approximate version of the weight decay on the input weights, which is more computationally efficient
01347                 penalties.append(affine_transform_weight_penalty(activated_weights[i], (input_dist_rep_predictor_weight_decay + weight_decay), 
01348                                                                  (input_dist_rep_predictor_weight_decay + weight_decay), penalty_type));                   
01349         }
01350         */
01351         // Apply only bias decay for first token, since these biases are present in all dist. rep. predictions
01352         if(nfeatures_for_each_token.length() != 0)
01353         {
01354             int sum=0;
01355             int sum_dict = 0;
01356             for(int i=0; i<nfeatures_for_each_token.length(); i++)
01357             {
01358                 penalties.append(buildSparseAffineTransformWeightPenalty(winputdistrep.subVarArray(sum,nfeatures_for_each_token[i]+1), input, input_to_dict_index, sum_dict, input_dist_rep_predictor_weight_decay + weight_decay, input_dist_rep_predictor_bias_decay + bias_decay, penalty_type));
01359                 sum += nfeatures_for_each_token[i]+1;
01360                 sum_dict += nfeatures_for_each_token[i];
01361             }
01362         }
01363         else
01364         {
01365             for(int i=0; i<this_ntokens; i++)
01366                 penalties.append(buildSparseAffineTransformWeightPenalty(winputdistrep, input, input_to_dict_index, i*nfeatures_per_token, input_dist_rep_predictor_weight_decay + weight_decay, (i==0 ? input_dist_rep_predictor_bias_decay + bias_decay : 0), penalty_type));
01367         }
01368     }
01369     //if(winputdistrep.length() != 0 && (input_dist_rep_predictor_bias_decay + bias_decay))
01370     //    penalties.append(affine_transform_weight_penalty(winputdistrep[nfeatures_per_token], (input_dist_rep_predictor_bias_decay + bias_decay), 
01371     //                                                    (input_dist_rep_predictor_bias_decay + bias_decay), penalty_type));
01372 
01373     if(winputsparse.length() != 0 && (winputsparse_weight_decay + winputsparse_bias_decay != 0))
01374     {
01375         penalties.append(buildSparseAffineTransformWeightPenalty(winputsparse, input, input_to_dict_index, 0, winputsparse_weight_decay, winputsparse_bias_decay, penalty_type));
01376     }
01377 
01378 //    if(winputsparse.length() != 0 && winputsparse_weight_decay)
01379 //    {
01380 //        for(int i=0; i<activated_weights.length(); i++)
01381 //        {
01382 //            penalties.append(affine_transform_weight_penalty(activated_weights[i], winputsparse_weight_decay, 
01383 //                                                             winputsparse_weight_decay, penalty_type));
01384 //        }
01385 //    }
01386 //    if(winputsparse.length() != 0 && winputsparse_bias_decay)
01387 //        penalties.append(affine_transform_weight_penalty(winputsparse.last(), winputsparse_bias_decay, 
01388 //                                                         winputsparse_bias_decay, penalty_type));
01389 
01390 }
01391 
01393 // computeCostsFromOutputs //
01395 void DistRepNNet::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv, 
01396                                    const Vec& targetv, Vec& costsv) const
01397 {
01398     PLERROR("In DistRepNNet::computeCostsFromOutputs: output is not enough to compute cost");
01399     //computeOutputAndCosts(inputv,targetv,outputv,costsv);
01400 }
01401 
01403 // computeOutput //
01405 void DistRepNNet::computeOutput(const Vec& inputv, Vec& outputv) const
01406 {
01407     f->sizefprop(inputv,output_comp);
01408     row.resize(inputsize_);
01409     row << inputv;
01410     row.resize(train_set->width());
01411     row.subVec(inputsize_,train_set->width()-inputsize_).fill(MISSING_VALUE);
01412     if(target_dictionary)
01413         target_dictionary->getValues(options,target_values);
01414     else
01415         train_set->getValues(row,inputsize_,target_values);
01416     // TO REMOVE!!!
01417     //for(int i=0; i<outputv.length(); i++)
01418     //{
01419     //    outputv[(int)target_values[i]] = output->valuedata[i];
01420     //}
01421     outputv[0] = target_values[(int)output_comp[0]];
01422     //outputv[0] = (int)output_comp[0];
01423 }
01424 
01426 // computeOutputAndCosts //
01428 void DistRepNNet::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
01429                                  Vec& outputv, Vec& costsv) const
01430 {
01431     test_costf->sizefprop(inputv&targetv, output_comp&costsv);
01432     row.resize(inputsize_);
01433     row << inputv;
01434     row.resize(train_set->width());
01435     row.subVec(inputsize_,train_set->width()-inputsize_).fill(MISSING_VALUE);
01436     if(target_dictionary)
01437         target_dictionary->getValues(options,target_values);
01438     else
01439         train_set->getValues(row,inputsize_,target_values);
01440     outputv[0] = target_values[(int)output_comp[0]];
01441     //outputv[0] = (int)output_comp[0];
01442     //for(int i=0; i<costsv.length(); i++)
01443     //    if(is_missing(costsv[i]))
01444     //        cout << "WHAT THE FUCK!!!" << endl;
01445 }
01446 
01448 // fillWeights //
01450 void DistRepNNet::fillWeights(const Var& weights, bool clear_first_row, int use_this_to_scale) {
01451     if (initialization_method == "zero") {
01452         weights->value->clear();
01453         return;
01454     }
01455     real delta;
01456     int is;
01457     if(use_this_to_scale > 0)
01458         is = use_this_to_scale;
01459     else
01460         is = weights.length();
01461     if (clear_first_row)
01462         is--; // -1 to get the same result as before.
01463     if (initialization_method.find("linear") != string::npos)
01464         delta = 1.0 / real(is);
01465     else
01466         delta = 1.0 / sqrt(real(is));
01467     if (initialization_method.find("normal") != string::npos)
01468         fill_random_normal(weights->value, 0, delta);
01469     else
01470         fill_random_uniform(weights->value, -delta, delta);
01471     if (clear_first_row)
01472         weights->matValue(0).clear();
01473 }
01474 
01476 // forget //
01478 void DistRepNNet::forget()
01479 {
01480     if (train_set) 
01481     {
01482         paramsvalues.resize(0);
01483         build();
01484     }
01485     if(optimizer)
01486         optimizer->reset();
01487     for(int t=0; t<optimizer_extra_tasks.length(); t++)
01488         optimizer_extra_tasks[t]->reset();
01489     stage = 0;
01490 }
01491 
01493 // getTrainCostNames //
01495 TVec<string> DistRepNNet::getTrainCostNames() const
01496 {
01497     PLASSERT( !cost_funcs.isEmpty() );
01498     int n_costs = cost_funcs.length();
01499     TVec<string> train_costs(n_costs + 1);
01500     train_costs[0] = cost_funcs[0] + "+penalty";
01501     train_costs.subVec(1, n_costs) << cost_funcs;
01502     return train_costs;
01503 }
01504 
01506 // getTestCostNames //
01508 TVec<string> DistRepNNet::getTestCostNames() const
01509 { 
01510     return cost_funcs;
01511 }
01512 
01514 // add_transfer_func //
01516 Var DistRepNNet::add_transfer_func(const Var& input, string transfer_func, VarArray mus, Var sigma) {
01517     Var result;
01518     if (transfer_func == "default")
01519         transfer_func = hidden_transfer_func;
01520     if(transfer_func=="linear")
01521         result = input;
01522     else if(transfer_func=="tanh")
01523         result = tanh(input);
01524     else if(transfer_func=="sigmoid")
01525         result = sigmoid(input);
01526     else if(transfer_func=="softplus")
01527         result = softplus(input);
01528     else if(transfer_func=="exp")
01529         result = exp(input);
01530     else if(transfer_func=="softmax")
01531         result = softmax(input);
01532     else if (transfer_func == "log_softmax")
01533         result = log_softmax(input);
01534     else if(transfer_func=="hard_slope")
01535         result = unary_hard_slope(input,0,1);
01536     else if(transfer_func=="symm_hard_slope")
01537         result = unary_hard_slope(input,-1,1);
01538     else PLERROR("In DistRepNNet::add_transfer_func(): Unknown value for transfer_func: %s",transfer_func.c_str());
01539     return result;
01540 }
01541 
01543 // initializeParams //
01545 void DistRepNNet::initializeParams(bool set_seed, int task_index)
01546 {
01547     if (set_seed) {
01548         if (seed_>=0)
01549             manual_seed(seed_);
01550         else
01551             PLearn::seed();
01552     }
01553 
01554     if(w1) fillWeights(w1, true);
01555     /*
01556       if(w1target)
01557       fillWeights(w1target, false);
01558     */
01559     if(w2) fillWeights(w2, true);
01560     if(w1theta) fillWeights(w1theta,true);
01561     if(direct_wout) fillWeights(direct_wout,false);
01562 
01563     if(wout)
01564         if (fixed_output_weights) {
01565             static Vec values;
01566             if (values.size()==0)
01567             {
01568                 values.resize(2);
01569                 values[0]=-1;
01570                 values[1]=1;
01571             }
01572             fill_random_discrete(wout->value, values);
01573             wout->matValue(0).clear();                        
01574         }
01575         else fillWeights(wout, true);
01576     if(outbias) outbias->matValue.clear();
01577 
01578     if(wouttheta) fillWeights(wouttheta,true,wouttheta->width());
01579 
01580     if(woutdistrep) fillWeights(woutdistrep,true);
01581     if(winputdistrep.size() != 0)
01582     {
01583         if(initialize_sparse_params_to_zero)
01584         {
01585             for(int i=0; i<winputdistrep.length(); i++)
01586                 winputdistrep[i]->value.fill(0.0);
01587         }
01588         else
01589         {
01590             if(nfeatures_for_each_token.length() != 0)
01591             {
01592                 int sum = 0;
01593                 for(int t=0; t<nfeatures_for_each_token.length(); t++)
01594                 {
01595                     for(int i=0; i<nfeatures_for_each_token[t]; i++)
01596                     {
01597                         if(task_index < 0)
01598                             fillWeights(winputdistrep[sum+i],false,train_set->inputsize());
01599                         else
01600                             fillWeights(winputdistrep[sum+i],false,extra_tasks[task_index]->inputsize());
01601                     }
01602                     winputdistrep[sum+nfeatures_for_each_token[t]]->value.fill(0.0);
01603                     sum += nfeatures_for_each_token[t]+1;
01604                 }
01605             }
01606             else
01607             {
01608                 for(int i=0; i<nfeatures_per_token; i++)
01609                 {
01610                     if(task_index < 0)
01611                         fillWeights(winputdistrep[i],false,train_set->inputsize());
01612                     else
01613                         fillWeights(winputdistrep[i],false,extra_tasks[task_index]->inputsize());
01614                 }
01615                 winputdistrep[nfeatures_per_token]->value.fill(0.0);
01616             }
01617         }
01618     }
01619     if(winputsparse.size() != 0)
01620     {
01621         if(initialize_sparse_params_to_zero)
01622         {
01623             for(int i=0; i<winputsparse.length(); i++)
01624                 winputsparse[i]->value.fill(0.0);
01625         }
01626         else
01627         {
01628             for(int i=0; i<winputsparse.size(); i++)
01629             {
01630                 if(task_index < 0)
01631                     fillWeights(winputsparse[i],false,train_set->inputsize());
01632                 else
01633                     fillWeights(winputsparse[i],false,extra_tasks[task_index]->inputsize());
01634             }
01635             winputsparse.last()->value.fill(0.0);
01636         }
01637     }
01638 }
01639 
01641 #ifdef __INTEL_COMPILER
01642 #pragma warning(disable:1419)  // Get rid of compiler warning.
01643 #endif
01644 extern void varDeepCopyField(Var& field, CopiesMap& copies);
01645 #ifdef __INTEL_COMPILER
01646 #pragma warning(default:1419)
01647 #endif
01648 
01649 
01651 // makeDeepCopyFromShallowCopy //
01653 void DistRepNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01654 {
01655     inherited::makeDeepCopyFromShallowCopy(copies);
01656     deepCopyField(target_values,copies);
01657     deepCopyField(output_comp,copies);
01658     deepCopyField(row,copies);
01659     deepCopyField(tf,copies);
01660     //deepCopyField(cost_paramf,copies);
01661 
01662     varDeepCopyField(output, copies);
01663     deepCopyField(costs, copies);
01664     deepCopyField(partial_update_vars, copies);
01665     deepCopyField(penalties, copies);
01666     varDeepCopyField(training_cost, copies);
01667     deepCopyField(training_cost_extra_tasks, copies);
01668     varDeepCopyField(test_costs, copies);
01669     deepCopyField(invars, copies);
01670     deepCopyField(invars_extra_tasks, copies);
01671     deepCopyField(params, copies);
01672     //deepCopyField(activated_weights, copies);
01673     deepCopyField(input_to_dict_index,copies);
01674 
01675     deepCopyField(paramsvalues, copies);
01676     varDeepCopyField(input, copies);
01677     varDeepCopyField(dp_input, copies);
01678     varDeepCopyField(target, copies);
01679     varDeepCopyField(sampleweight, copies);
01680     varDeepCopyField(w1, copies);
01681     deepCopyField(winputsparse, copies);
01682     //varDeepCopyField(w1target, copies);
01683     varDeepCopyField(w1theta, copies);
01684     deepCopyField(winputdistrep, copies);
01685     varDeepCopyField(woutdistrep, copies);
01686     varDeepCopyField(w2, copies);
01687     varDeepCopyField(wout, copies);
01688     varDeepCopyField(direct_wout, copies);
01689     //varDeepCopyField(wouttarget, copies);
01690     varDeepCopyField(wouttheta, copies);
01691     varDeepCopyField(outbias, copies);
01692     varDeepCopyField(dp_all_targets, copies);
01693     varDeepCopyField(token_features, copies);
01694     varDeepCopyField(dist_rep, copies);
01695     //deepCopyField(dist_reps, copies);
01696     deepCopyField(dictionaries,copies);
01697     //deepCopyField(seen_target, copies);
01698     //deepCopyField(unseen_target, copies);
01699     deepCopyField(f, copies);
01700     deepCopyField(test_costf, copies);
01701     deepCopyField(token_to_dist_rep, copies);
01702     deepCopyField(paramf, copies);
01703 
01704     deepCopyField(extra_tasks, copies);
01705     deepCopyField(nhidden_extra_tasks, copies);
01706     deepCopyField(nhidden2_extra_tasks, copies);
01707     deepCopyField(cost_funcs, copies);
01708     deepCopyField(optimizer, copies);
01709     deepCopyField(optimizer_extra_tasks, copies);
01710     deepCopyField(dist_rep_dim, copies);
01711     deepCopyField(ntokens_extra_tasks,copies);
01712     deepCopyField(target_dictionary,copies);
01713     deepCopyField(target_dist_rep,copies);
01714 }
01715 
01717 // outputsize //
01719 int DistRepNNet::outputsize() const {
01720     return targetsize_;
01721     //return dictionaries[target_dict_index]->size() + (dictionaries[target_dict_index]->oov_not_in_possible_values ? 0 : 1); 
01722 }
01723 
01724 void DistRepNNet::getTokenDistRep(TVec<string>& token_features, Vec& dist_rep)
01725 {
01726     tf.resize(token_features.length());
01727     for(int i=0; i<tf.length(); i++)
01728     {
01729         if(input_to_dict_index[i] < 0)
01730             tf[i] = toreal(token_features[i]);
01731         else
01732             tf[i] = dictionaries[input_to_dict_index[i]]->getId(token_features[i]);
01733     }
01734     token_to_dist_rep->fprop(tf,dist_rep);
01735 }
01736 
01738 // train //
01740 void DistRepNNet::train()
01741 {
01742     // DistRepNNet nstages is number of epochs (whole passages through the training set)
01743     // while optimizer nstages is number of weight updates.
01744     // So relationship between the 2 depends whether we are in stochastic, batch or minibatch mode
01745 
01746     if(!train_set)
01747         PLERROR("In DistRepNNet::train, you did not setTrainingSet");
01748     
01749     if(!train_stats)
01750         PLERROR("In DistRepNNet::train, you did not setTrainStatsCollector");
01751     
01752     if(!use_extra_tasks_only_on_first_epoch || stage == 0)
01753     {
01754         int current_stage = stage;
01755         for(int t=0; t<extra_tasks.length(); t++)
01756         {
01757             int l = extra_tasks[t]->length();  
01758             
01759             // number of samples seen by optimizer before each optimizer update
01760             int nsamples = batch_size>0 ? batch_size : l;
01761             paramf = Func(invars_extra_tasks[t], training_cost_extra_tasks[t]); // parameterized function to optimize
01762             Var totalcost = meanOf(extra_tasks[t], paramf, nsamples, true);
01763             if(optimizer_extra_tasks[t])
01764             {
01765                 optimizer_extra_tasks[t]->setToOptimize(params, totalcost);
01766                 if(partial_update_vars.length() != 0) optimizer_extra_tasks[t]->setPartialUpdateVars(partial_update_vars);
01767                 optimizer_extra_tasks[t]->build();
01768             }
01769             else PLERROR("DistRepNNet::train can't train without setting an optimizer first!");
01770             
01771             // number of optimizer stages corresponding to one learner stage (one epoch)
01772             int optstage_per_lstage = l/nsamples;
01773             
01774             PP<ProgressBar> pb;
01775             if(report_progress)
01776                 pb = new ProgressBar("Extra task " + tostring(t) + ", Training " + classname() + " from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01777             
01778             int initial_stage = stage;
01779             bool early_stop=false;
01780             //displayFunction(paramf, true, false, 250);
01781             //cout << params.size() << " params to train" << endl;
01782             while(stage<nstages && !early_stop)
01783             {
01784                 optimizer_extra_tasks[t]->nstages = optstage_per_lstage;
01785                 train_stats->forget();
01786                 optimizer_extra_tasks[t]->early_stop = false;
01787                 optimizer_extra_tasks[t]->optimizeN(*train_stats);
01788                 // optimizer->verifyGradient(1e-4); // Uncomment if you want to check your new Var.
01789                 train_stats->finalize();
01790                 if(verbosity>2)
01791                     cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
01792                 ++stage;
01793                 if(pb)
01794                     pb->update(stage-initial_stage);
01795             }
01796             if(verbosity>1)
01797                 cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
01798             
01799             stage = current_stage;
01800         }
01801     }    
01802     
01803     int l = train_set->length();  
01804 
01805     if(f.isNull()) // Net has not been properly built yet (because build was called before the learner had a proper training set)
01806         build();
01807 
01808     // number of samples seen by optimizer before each optimizer update
01809     int nsamples = batch_size>0 ? batch_size : l;
01810     paramf = Func(invars, training_cost); // parameterized function to optimize
01811     Var totalcost = meanOf(train_set, paramf, nsamples, true);
01812     if(optimizer)
01813     {
01814         optimizer->setToOptimize(params, totalcost);
01815         if(partial_update_vars.length() != 0) optimizer->setPartialUpdateVars(partial_update_vars);
01816         optimizer->build();
01817     }
01818     else PLERROR("DistRepNNet::train can't train without setting an optimizer first!");
01819 
01820     // number of optimizer stages corresponding to one learner stage (one epoch)
01821     int optstage_per_lstage = l/nsamples;
01822 
01823     PP<ProgressBar> pb;
01824     if(report_progress)
01825         pb = new ProgressBar("Training " + classname() + " from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01826 
01827     int initial_stage = stage;
01828     bool early_stop=false;
01829     //displayFunction(paramf, true, false, 250);
01830     //cout << params.size() << " params to train" << endl;
01831     while(stage<nstages && !early_stop)
01832     {
01833         optimizer->nstages = optstage_per_lstage;
01834         train_stats->forget();
01835         optimizer->early_stop = false;
01836         optimizer->optimizeN(*train_stats);
01837         // optimizer->verifyGradient(1e-4); // Uncomment if you want to check your new Var.
01838         train_stats->finalize();
01839         if(verbosity>2)
01840             cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
01841         ++stage;
01842         if(pb)
01843             pb->update(stage-initial_stage);
01844     }
01845     if(verbosity>1)
01846         cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
01847 
01848     // HUGO: Why?
01849     test_costf->recomputeParents();
01850 }
01851 
01852 } // end of namespace PLearn
01853 
01854 
01855 /*
01856   Local Variables:
01857   mode:c++
01858   c-basic-offset:4
01859   c-file-style:"stroustrup"
01860   c-file-offsets:((innamespace . 0)(inline-open . 0))
01861   indent-tabs-mode:nil
01862   fill-column:79
01863   End:
01864 */
01865 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines