PLearn 0.1
MultiInstanceNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // MultiInstanceNNet.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (c) 1999-2005 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* *******************************************************      
00038  * $Id: MultiInstanceNNet.cc 8321 2007-11-28 21:37:09Z nouiz $
00039  ******************************************************* */
00040 
00041 
00042 
00043 #include <plearn/var/AffineTransformVariable.h>
00044 #include <plearn/var/AffineTransformWeightPenalty.h>
00045 #include <plearn/var/BinaryClassificationLossVariable.h>
00046 #include <plearn/var/ClassificationLossVariable.h>
00047 #include <plearn/var/ConcatColumnsVariable.h>
00048 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00049 #include <plearn/var/CrossEntropyVariable.h>
00050 #include <plearn/var/ExpVariable.h>
00051 #include <plearn/var/LogVariable.h>
00052 #include <plearn/var/LiftOutputVariable.h>
00053 #include <plearn/var/LogSoftmaxVariable.h>
00054 #include <plearn/var/MulticlassLossVariable.h>
00055 #include "MultiInstanceNNet.h"
00056 #include <plearn/var/UnfoldedSumOfVariable.h>
00057 #include <plearn/var/SumOverBagsVariable.h>
00058 #include <plearn/var/SumSquareVariable.h>
00059 #include <plearn/math/random.h>
00060 #include <plearn/var/SigmoidVariable.h>
00061 #include <plearn/var/SumVariable.h>
00062 #include <plearn/var/SumAbsVariable.h>
00063 #include <plearn/var/SumOfVariable.h>
00064 #include <plearn/vmat/SubVMatrix.h>
00065 #include <plearn/var/TanhVariable.h>
00066 #include <plearn/var/TransposeProductVariable.h>
00067 #include <plearn/var/Var_operators.h>
00068 #include <plearn/var/Var_utils.h>
00069 
00070 //#include "DisplayUtils.h"
00071 //#include "GradientOptimizer.h"
00072 
00073 namespace PLearn {
00074 using namespace std;
00075 
00076 PLEARN_IMPLEMENT_OBJECT(MultiInstanceNNet, 
00077                         "Multi-instance feedforward neural network for probabilistic classification", 
00078                         "The data has the form of a set of input vectors x_i associated with a single\n"
00079                         "label y. Each x_i is an instance and the overall set of instance is called a bag.\n"
00080                         "We don't know which of the inputs is responsible for the label, i.e.\n"
00081                         "there are hidden (not observed) labels y_i associated with each of the inputs x_i.\n"
00082                         "We also know that y=1 if at least one of the y_i is 1, otherwise y=0, i.e.\n"
00083                         "   y = y_1 or y_2 or ... y_m\n"
00084                         "In terms of probabilities, it means that\n"
00085                         "   P(Y=0|x_1..x_m) = \\prod_{i=1}^m P(y_i=0|x_i)\n"
00086                         "which determines the likelihood of the observation (x_1...x_m,y).\n"
00087                         "The neural network implements the computation of P(y_i=1|x_i). The same\n"
00088                         "model is assumed for all instances in the bag. The number of instances is variable but\n"
00089                         "bounded a-priori (max_n_instances). The gradient is computed for a whole bag\n"
00090                         "at a time. The architectural parameters and hyper-parameters of the model\n"
00091                         "are otherwise the same as for the generic NNet class.\n"
00092                         "The bags within each data set are specified with a 2nd target column\n"
00093                         "(the first column is 0, 1 or missing; it should not be missing for the\n"
00094                         "last column of the bag). The second target column should be 0,1,2, or 3:\n"
00095                         "  1: first row of a bag\n"
00096                         "  2: last row of a bag\n"
00097                         "  3: simultaneously first and last, there is only one row in this bag\n"
00098                         "  0: intermediate row of a bag\n"
00099                         "following the protocol expected by the SumOverBagsVariable.\n"
00100     );
00101 
00102 MultiInstanceNNet::MultiInstanceNNet() // DEFAULT VALUES FOR ALL OPTIONS
00103     : training_set_has_changed(false),
00104       max_n_instances(1),
00105       nhidden(0),
00106       nhidden2(0),
00107       weight_decay(0),
00108       bias_decay(0),
00109       layer1_weight_decay(0),
00110       layer1_bias_decay(0),
00111       layer2_weight_decay(0),
00112       layer2_bias_decay(0),
00113       output_layer_weight_decay(0),
00114       output_layer_bias_decay(0),
00115       direct_in_to_out_weight_decay(0),
00116       penalty_type("L2_square"),
00117       L1_penalty(false),
00118       direct_in_to_out(false),
00119       interval_minval(0), interval_maxval(1),
00120       test_bag_size(0),
00121       batch_size(1)
00122 {}
00123 
00124 MultiInstanceNNet::~MultiInstanceNNet()
00125 {
00126 }
00127 
00128 void MultiInstanceNNet::declareOptions(OptionList& ol)
00129 {
00130     declareOption(ol, "max_n_instances", &MultiInstanceNNet::max_n_instances, OptionBase::buildoption, 
00131                   "    maximum number of instances (input vectors x_i) allowed\n");
00132 
00133     declareOption(ol, "nhidden", &MultiInstanceNNet::nhidden, OptionBase::buildoption, 
00134                   "    number of hidden units in first hidden layer (0 means no hidden layer)\n");
00135 
00136     declareOption(ol, "nhidden2", &MultiInstanceNNet::nhidden2, OptionBase::buildoption, 
00137                   "    number of hidden units in second hidden layer (0 means no hidden layer)\n");
00138 
00139     declareOption(ol, "weight_decay", &MultiInstanceNNet::weight_decay, OptionBase::buildoption, 
00140                   "    global weight decay for all layers\n");
00141 
00142     declareOption(ol, "bias_decay", &MultiInstanceNNet::bias_decay, OptionBase::buildoption, 
00143                   "    global bias decay for all layers\n");
00144 
00145     declareOption(ol, "layer1_weight_decay", &MultiInstanceNNet::layer1_weight_decay, OptionBase::buildoption, 
00146                   "    Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00147     declareOption(ol, "layer1_bias_decay", &MultiInstanceNNet::layer1_bias_decay, OptionBase::buildoption, 
00148                   "    Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00149 
00150     declareOption(ol, "layer2_weight_decay", &MultiInstanceNNet::layer2_weight_decay, OptionBase::buildoption, 
00151                   "    Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00152 
00153     declareOption(ol, "layer2_bias_decay", &MultiInstanceNNet::layer2_bias_decay, OptionBase::buildoption, 
00154                   "    Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00155 
00156     declareOption(ol, "output_layer_weight_decay", &MultiInstanceNNet::output_layer_weight_decay, OptionBase::buildoption, 
00157                   "    Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00158 
00159     declareOption(ol, "output_layer_bias_decay", &MultiInstanceNNet::output_layer_bias_decay, OptionBase::buildoption, 
00160                   "    Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00161 
00162     declareOption(ol, "direct_in_to_out_weight_decay", &MultiInstanceNNet::direct_in_to_out_weight_decay, OptionBase::buildoption, 
00163                   "    Additional weight decay for the direct in-to-out layer.  Is added to 'weight_decay'.\n");
00164 
00165     declareOption(ol, "penalty_type", &MultiInstanceNNet::penalty_type, OptionBase::buildoption,
00166                   "    Penalty to use on the weights (for weight and bias decay).\n"
00167                   "    Can be any of:\n"
00168                   "      - \"L1\": L1 norm,\n"
00169                   "      - \"L1_square\": square of the L1 norm,\n"
00170                   "      - \"L2_square\" (default): square of the L2 norm.\n");
00171 
00172     declareOption(ol, "L1_penalty", &MultiInstanceNNet::L1_penalty, OptionBase::buildoption, 
00173                   "    Deprecated - You should use \"penalty_type\" instead\n"
00174                   "    should we use L1 penalty instead of the default L2 penalty on the weights?\n");
00175 
00176     declareOption(ol, "direct_in_to_out", &MultiInstanceNNet::direct_in_to_out, OptionBase::buildoption, 
00177                   "    should we include direct input to output connections?\n");
00178 
00179     declareOption(ol, "optimizer", &MultiInstanceNNet::optimizer, OptionBase::buildoption, 
00180                   "    specify the optimizer to use\n");
00181 
00182     declareOption(ol, "batch_size", &MultiInstanceNNet::batch_size, OptionBase::buildoption, 
00183                   "    how many samples to use to estimate the avergage gradient before updating the weights\n"
00184                   "    0 is equivalent to specifying training_set->n_non_missing_rows() \n");
00185 
00186     declareOption(ol, "paramsvalues", &MultiInstanceNNet::paramsvalues, OptionBase::learntoption, 
00187                   "    The learned parameter vector\n");
00188 
00189     inherited::declareOptions(ol);
00190 
00191 }
00192 
00193 void MultiInstanceNNet::build()
00194 {
00195     inherited::build();
00196     build_();
00197 }
00198 
00199 void MultiInstanceNNet::setTrainingSet(VMat training_set, bool call_forget)
00200 { 
00201     training_set_has_changed =
00202         !train_set || train_set->width()!=training_set->width() ||
00203         train_set->length()!=training_set->length() || train_set->inputsize()!=training_set->inputsize()
00204         || train_set->weightsize()!= training_set->weightsize();
00205 
00206     train_set = training_set;
00207     if (training_set_has_changed)
00208     {
00209         inputsize_ = train_set->inputsize();
00210         targetsize_ = train_set->targetsize();
00211         weightsize_ = train_set->weightsize();
00212     }
00213 
00214     if (training_set_has_changed || call_forget)
00215     {
00216         build(); // MODIF FAITE PAR YOSHUA: sinon apres un setTrainingSet le build n'est pas complete dans un MultiInstanceNNet train_set = training_set;
00217         if (call_forget) forget();
00218     }
00219 
00220 }
00221 
00222 void MultiInstanceNNet::build_()
00223 {
00224     /*
00225      * Create Topology Var Graph
00226      */
00227 
00228     // Don't do anything if we don't have a train_set
00229     // It's the only one who knows the inputsize and targetsize anyway...
00230 
00231     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00232     {
00233 
00234       
00235         // init. basic vars
00236         input = Var(inputsize(), "input");
00237         output = input;
00238         params.resize(0);
00239 
00240         if (targetsize()!=2)
00241             PLERROR("MultiInstanceNNet:: expected the data to have 2 target columns, got %d",
00242                     targetsize());
00243 
00244         // first hidden layer
00245         if(nhidden>0)
00246         {
00247             w1 = Var(1+inputsize(), nhidden, "w1");      
00248             output = tanh(affine_transform(output,w1));
00249             params.append(w1);
00250         }
00251 
00252         // second hidden layer
00253         if(nhidden2>0)
00254         {
00255             w2 = Var(1+nhidden, nhidden2, "w2");
00256             output = tanh(affine_transform(output,w2));
00257             params.append(w2);
00258         }
00259 
00260         if (nhidden2>0 && nhidden==0)
00261             PLERROR("MultiInstanceNNet:: can't have nhidden2 (=%d) > 0 while nhidden=0",nhidden2);
00262       
00263         // output layer before transfer function
00264         wout = Var(1+output->size(), outputsize(), "wout");
00265         output = affine_transform(output,wout);
00266         params.append(wout);
00267 
00268         // direct in-to-out layer
00269         if(direct_in_to_out)
00270         {
00271             wdirect = Var(inputsize(), outputsize(), "wdirect");// Var(1+inputsize(), outputsize(), "wdirect");
00272             output += transposeProduct(wdirect, input);// affine_transform(input,wdirect);
00273             params.append(wdirect);
00274         }
00275 
00276         // the output transfer function is FIXED: it must be a sigmoid (0/1 probabilistic classification)
00277 
00278         output = sigmoid(output);
00279 
00280         /*
00281          * target and weights
00282          */
00283 
00284         target = Var(1, "target");
00285 
00286         if(weightsize_>0)
00287         {
00288             if (weightsize_!=1)
00289                 PLERROR("MultiInstanceNNet: expected weightsize to be 1 or 0 (or unspecified = -1, meaning 0), got %d",weightsize_);
00290             sampleweight = Var(1, "weight");
00291         }
00292 
00293         // build costs
00294         if( L1_penalty )
00295         {
00296             PLDEPRECATED("Option \"L1_penalty\" deprecated. Please use \"penalty_type = L1\" instead.");
00297             L1_penalty = 0;
00298             penalty_type = "L1";
00299         }
00300 
00301         string pt = lowerstring( penalty_type );
00302         if( pt == "l1" )
00303             penalty_type = "L1";
00304         else if( pt == "l1_square" || pt == "l1 square" || pt == "l1square" )
00305             penalty_type = "L1_square";
00306         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00307             penalty_type = "L2_square";
00308         else if( pt == "l2" )
00309         {
00310             PLWARNING("L2 penalty not supported, assuming you want L2 square");             penalty_type = "L2_square";
00311         }
00312         else
00313             PLERROR("penalty_type \"%s\" not supported", penalty_type.c_str());
00314 
00315         // create penalties
00316         penalties.resize(0);  // prevents penalties from being added twice by consecutive builds
00317         if(w1 && (!fast_exact_is_equal(layer1_weight_decay + weight_decay,0) ||
00318                   !fast_exact_is_equal(layer1_bias_decay + bias_decay,    0)))
00319             penalties.append(affine_transform_weight_penalty(w1, (layer1_weight_decay + weight_decay), (layer1_bias_decay + bias_decay), penalty_type));
00320         if(w2 && (!fast_exact_is_equal(layer2_weight_decay + weight_decay,0) ||
00321                   !fast_exact_is_equal(layer2_bias_decay + bias_decay,    0)))
00322             penalties.append(affine_transform_weight_penalty(w2, (layer2_weight_decay + weight_decay), (layer2_bias_decay + bias_decay), penalty_type));
00323         if(wout && (!fast_exact_is_equal(output_layer_weight_decay + weight_decay, 0) ||
00324                     !fast_exact_is_equal(output_layer_bias_decay + bias_decay, 0)))
00325             penalties.append(affine_transform_weight_penalty(wout, (output_layer_weight_decay + weight_decay), 
00326                                                              (output_layer_bias_decay + bias_decay), penalty_type));
00327         if(wdirect && !fast_exact_is_equal(direct_in_to_out_weight_decay + weight_decay, 0))
00328         {
00329             if (penalty_type=="L1_square")
00330                 penalties.append(square(sumabs(wdirect))*(direct_in_to_out_weight_decay + weight_decay));
00331             else if (penalty_type=="L1")
00332                 penalties.append(sumabs(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00333             else if (penalty_type=="L2_square")
00334                 penalties.append(sumsquare(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00335         }
00336 
00337         // Shared values hack...
00338         if(paramsvalues.length() == params.nelems())
00339             params << paramsvalues;
00340         else
00341         {
00342             paramsvalues.resize(params.nelems());
00343             initializeParams();
00344         }
00345         params.makeSharedValue(paramsvalues);
00346 
00347         output->setName("element output");
00348 
00349         f = Func(input, output);
00350 
00351         input_to_logP0 = Func(input, log(1 - output));
00352 
00353         bag_size = Var(1,1);
00354         bag_inputs = Var(max_n_instances,inputsize());
00355         bag_output = 1-exp(unfoldedSumOf(bag_inputs,bag_size,input_to_logP0,max_n_instances));
00356 
00357         costs.resize(3); // (negative log-likelihood, classification error, lift output) for the bag
00358 
00359         costs[0] = cross_entropy(bag_output, target);
00360         costs[1] = binary_classification_loss(bag_output,target);
00361         costs[2] = lift_output(bag_output, target);
00362         test_costs = hconcat(costs);
00363 
00364         // Apply penalty to cost.
00365         // If there is no penalty, we still add costs[0] as the first cost, in
00366         // order to keep the same number of costs as if there was a penalty.
00367         if(penalties.size() != 0) {
00368             if (weightsize_>0)
00369                 // only multiply by sampleweight if there are weights
00370                 training_cost = hconcat(sampleweight*sum(hconcat(costs[0] & penalties)) // don't weight the lift output
00371                                         & (costs[0]*sampleweight) & (costs[1]*sampleweight) & costs[2]);
00372             else {
00373                 training_cost = hconcat(sum(hconcat(costs[0] & penalties)) & test_costs);
00374             }
00375         } 
00376         else {
00377             if(weightsize_>0) {
00378                 // only multiply by sampleweight if there are weights (but don't weight the lift output)
00379                 training_cost = hconcat(costs[0]*sampleweight & costs[0]*sampleweight & costs[1]*sampleweight & costs[2]);
00380             } else {
00381                 training_cost = hconcat(costs[0] & test_costs);
00382             }
00383         }
00384       
00385         training_cost->setName("training_cost");
00386         test_costs->setName("test_costs");
00387 
00388         if (weightsize_>0)
00389             invars = bag_inputs & bag_size & target & sampleweight;
00390         else
00391             invars = bag_inputs & bag_size & target;
00392 
00393         inputs_and_targets_to_test_costs = Func(invars,test_costs);
00394         inputs_and_targets_to_training_costs = Func(invars,training_cost);
00395 
00396         inputs_and_targets_to_test_costs->recomputeParents();
00397         inputs_and_targets_to_training_costs->recomputeParents();
00398 
00399         // A UN MOMENT DONNE target NE POINTE PLUS AU MEME ENDROIT!!!
00400     }
00401 }
00402 
00403 int MultiInstanceNNet::outputsize() const
00404 { return 1; }
00405 
00406 TVec<string> MultiInstanceNNet::getTrainCostNames() const
00407 {
00408     TVec<string> names(4);
00409     names[0] = "NLL+penalty";
00410     names[1] = "NLL";
00411     names[2] = "class_error";
00412     names[3] = "lift_output";
00413     return names;
00414 }
00415 
00416 TVec<string> MultiInstanceNNet::getTestCostNames() const
00417 { 
00418     TVec<string> names(3);
00419     names[0] = "NLL";
00420     names[1] = "class_error";
00421     names[2] = "lift_output";
00422     return names;
00423 }
00424 
00425 
00426 void MultiInstanceNNet::train()
00427 {
00428     // MultiInstanceNNet nstages is number of epochs (whole passages through the training set)
00429     // while optimizer nstages is number of weight updates.
00430     // So relationship between the 2 depends whether we are in stochastic, batch or minibatch mode
00431 
00432     if(!train_set)
00433         PLERROR("In MultiInstanceNNet::train, you did not setTrainingSet");
00434     
00435     if(!train_stats)
00436         PLERROR("In MultiInstanceNNet::train, you did not setTrainStatsCollector");
00437 
00438     if(f.isNull()) // Net has not been properly built yet (because build was called before the learner had a proper training set)
00439         build();
00440 
00441 
00442     if (training_set_has_changed)
00443     {
00444         // number of optimiser stages corresponding to one learner stage (one epoch)
00445         optstage_per_lstage = 0;
00446         int n_bags = -1;
00447         if (batch_size<=0)
00448             optstage_per_lstage = 1;
00449         else // must count the nb of bags in the training set
00450         {
00451             n_bags=0;
00452             int l = train_set->length();
00453             PP<ProgressBar> pb;
00454             if(report_progress)
00455                 pb = new ProgressBar("Counting nb bags in train_set for MultiInstanceNNet ", l);
00456             Vec row(train_set->width());
00457             int tag_column = train_set->inputsize() + train_set->targetsize() - 1;
00458             for (int i=0;i<l;i++) {
00459                 train_set->getRow(i,row);
00460                 int tag = (int)row[tag_column];
00461                 if (tag & SumOverBagsVariable::TARGET_COLUMN_FIRST) {
00462                     // indicates the beginning of a new bag.
00463                     n_bags++;
00464                 }
00465                 if(pb)
00466                     pb->update(i);
00467             }
00468             optstage_per_lstage = n_bags/batch_size;
00469         }
00470         training_set_has_changed = false;
00471     }
00472 
00473     Var totalcost = sumOverBags(train_set, inputs_and_targets_to_training_costs, max_n_instances, batch_size);
00474     if(optimizer)
00475     {
00476         optimizer->setToOptimize(params, totalcost);  
00477         optimizer->build();
00478     }
00479 
00480 
00481     PP<ProgressBar> pb;
00482     if(report_progress)
00483         pb = new ProgressBar("Training MultiInstanceNNet from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
00484 
00485     int initial_stage = stage;
00486     bool early_stop=false;
00487     while(stage<nstages && !early_stop)
00488     {
00489         optimizer->nstages = optstage_per_lstage;
00490         train_stats->forget();
00491         optimizer->early_stop = false;
00492         optimizer->optimizeN(*train_stats);
00493         train_stats->finalize();
00494         if(verbosity>2)
00495             cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
00496         ++stage;
00497         if(pb)
00498             pb->update(stage-initial_stage);
00499     }
00500     if(verbosity>1)
00501         cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
00502 
00503     //if (batch_size==0)
00504     //  optimizer->verifyGradient(0.001);
00505 
00506     //output_and_target_to_cost->recomputeParents();
00507     //test_costf->recomputeParents();
00508 
00509     // cerr << "totalcost->value = " << totalcost->value << endl;
00510     // cout << "Result for benchmark is: " << totalcost->value << endl;
00511 }
00512 
00513 
00514 void MultiInstanceNNet::computeOutput(const Vec& inputv, Vec& outputv) const
00515 {
00516     f->fprop(inputv,outputv);
00517 }
00518 
00520 // computeOutputAndCosts //
00522 void MultiInstanceNNet::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
00523                                               Vec& outputv, Vec& costsv) const
00524 {
00525     f->fprop(inputv,outputv); // this is the individual P(y_i|x_i), MAYBE UNNECESSARY CALCULATION
00526     // since the outputs will be re-computed when doing the fprop below at the end of the bag
00527     // (but if we want to provide them after each call...). The solution would
00528     // be to do like in computeCostsFromOutputs, keeping track of the outputs.
00529     int bag_signal = int(targetv[1]);
00530     if (bag_signal & 1) // first instance, start counting
00531         test_bag_size=0;
00532     bag_inputs->matValue(test_bag_size++) << inputv;
00533     if (!(bag_signal & 2)) // not reached the last instance
00534         costsv.fill(MISSING_VALUE);
00535     else // end of bag, we have a target and we can compute a cost
00536     {
00537         bag_size->valuedata[0]=test_bag_size;
00538         target->valuedata[0] = targetv[0];
00539         if (weightsize_>0) sampleweight->valuedata[0]=1; // the test weights are known and used higher up
00540         inputs_and_targets_to_test_costs->fproppath.fprop();
00541         inputs_and_targets_to_test_costs->outputs.copyTo(costsv);
00542     }
00543 }
00544 
00546 // computeCostsFromOutputs //
00548 void MultiInstanceNNet::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv, 
00549                                                 const Vec& targetv, Vec& costsv) const
00550 {
00551     instance_logP0.resize(max_n_instances);
00552     int bag_signal = int(targetv[1]);
00553     if (bag_signal & 1) // first instance, start counting
00554         test_bag_size=0;
00555     instance_logP0[test_bag_size++] = safeflog(1-outputv[0]);
00556     if (!(bag_signal & 2)) // not reached the last instance
00557         costsv.fill(MISSING_VALUE);
00558     else // end of bag, we have a target and we can compute a cost
00559     {
00560         instance_logP0.resize(test_bag_size);
00561         real bag_P0 = safeexp(sum(instance_logP0));
00562         int classe = int(targetv[0]);
00563         int predicted_classe = (bag_P0>0.5)?0:1;
00564         real nll = (classe==0)?-safeflog(bag_P0):-safeflog(1-bag_P0);
00565         int classification_error = (classe != predicted_classe);
00566         costsv[0] = nll;
00567         costsv[1] = classification_error;
00568         // Add the lift output.
00569         // Probably not working: it looks like it only takes into account the
00570         // output for the last instance in the bag.
00571         PLERROR("In MultiInstanceNNet::computeCostsFromOutputs - Probably "
00572                 "bugged, please check code");
00573         if (targetv[0] > 0) {
00574             costsv[2] = outputv[0];
00575         } else {
00576             costsv[2] = -outputv[0];
00577         }
00578     }
00579 }
00580 
00582 // initializeParams //
00584 void MultiInstanceNNet::initializeParams()
00585 {
00586     if (seed_>=0)
00587         manual_seed(seed_);
00588     else
00589         PLearn::seed();
00590 
00591     //real delta = 1./sqrt(inputsize());
00592     real delta = 1./inputsize();
00593     /*
00594       if(direct_in_to_out)
00595       {
00596       //fill_random_uniform(wdirect->value, -delta, +delta);
00597       fill_random_normal(wdirect->value, 0, delta);
00598       //wdirect->matValue(0).clear();
00599       }
00600     */
00601     if(nhidden>0)
00602     {
00603         //fill_random_uniform(w1->value, -delta, +delta);
00604         //delta = 1./sqrt(nhidden);
00605         fill_random_normal(w1->value, 0, delta);
00606         if(direct_in_to_out)
00607         {
00608             //fill_random_uniform(wdirect->value, -delta, +delta);
00609             fill_random_normal(wdirect->value, 0, 0.01*delta);
00610             wdirect->matValue(0).clear();
00611         }
00612         delta = 1./nhidden;
00613         w1->matValue(0).clear();
00614     }
00615     if(nhidden2>0)
00616     {
00617         //fill_random_uniform(w2->value, -delta, +delta);
00618         //delta = 1./sqrt(nhidden2);
00619         fill_random_normal(w2->value, 0, delta);
00620         delta = 1./nhidden2;
00621         w2->matValue(0).clear();
00622     }
00623     //fill_random_uniform(wout->value, -delta, +delta);
00624     fill_random_normal(wout->value, 0, delta);
00625     wout->matValue(0).clear();
00626 
00627     // Reset optimizer
00628     if(optimizer)
00629         optimizer->reset();
00630 }
00631 
00632 void MultiInstanceNNet::forget()
00633 {
00634     if (train_set) initializeParams();
00635     stage = 0;
00636 }
00637 
00639 #ifdef __INTEL_COMPILER
00640 #pragma warning(disable:1419)  // Get rid of compiler warning.
00641 #endif
00642 extern void varDeepCopyField(Var& field, CopiesMap& copies);
00643 #ifdef __INTEL_COMPILER
00644 #pragma warning(default:1419)
00645 #endif
00646 
00647 void MultiInstanceNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00648 {
00649     inherited::makeDeepCopyFromShallowCopy(copies);
00650     deepCopyField(instance_logP0, copies);
00651     varDeepCopyField(input, copies);
00652     varDeepCopyField(target, copies);
00653     varDeepCopyField(sampleweight, copies);
00654     varDeepCopyField(w1, copies);
00655     varDeepCopyField(w2, copies);
00656     varDeepCopyField(wout, copies);
00657     varDeepCopyField(wdirect, copies);
00658     varDeepCopyField(output, copies);
00659     varDeepCopyField(bag_size, copies);
00660     varDeepCopyField(bag_inputs, copies);
00661     varDeepCopyField(bag_output, copies);
00662     deepCopyField(inputs_and_targets_to_test_costs, copies);
00663     deepCopyField(inputs_and_targets_to_training_costs, copies);
00664     deepCopyField(input_to_logP0, copies);
00665     varDeepCopyField(nll, copies);
00666     deepCopyField(costs, copies);
00667     deepCopyField(penalties, copies);
00668     varDeepCopyField(training_cost, copies);
00669     varDeepCopyField(test_costs, copies);
00670     deepCopyField(invars, copies);
00671     deepCopyField(params, copies);
00672     deepCopyField(paramsvalues, copies);
00673     deepCopyField(f, copies);
00674     deepCopyField(test_costf, copies);
00675     deepCopyField(output_and_target_to_cost, copies);
00676     deepCopyField(optimizer, copies);
00677 }
00678 
00679 } // end of namespace PLearn
00680 
00681 
00682 /*
00683   Local Variables:
00684   mode:c++
00685   c-basic-offset:4
00686   c-file-style:"stroustrup"
00687   c-file-offsets:((innamespace . 0)(inline-open . 0))
00688   indent-tabs-mode:nil
00689   fill-column:79
00690   End:
00691 */
00692 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines