PLearn 0.1
mNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // mNNet.cc
00004 //
00005 // Copyright (C) 2007 Yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Yoshua Bengio, PAM
00036 
00039 #include "mNNet.h"
00040 //#include <plearn/math/pl_erf.h>
00041 
00042 namespace PLearn {
00043 using namespace std;
00044 
00045 PLEARN_IMPLEMENT_OBJECT(
00046     mNNet,
00047     "Multi-layer neural network based on matrix-matrix multiplications",
00048     "This is a LEAN neural network. No bells, no whistles.\n"
00049     );
00050 
00051 mNNet::mNNet()
00052     : noutputs(-1),
00053       init_lrate(0.0),
00054       lrate_decay(0.0),
00055       minibatch_size(1),
00056       output_type("NLL"),
00057       output_layer_L1_penalty_factor(0.0),
00058       n_layers(-1),
00059       cumulative_training_time(0.0)
00060 {
00061     random_gen = new PRandom();
00062 }
00063 
00064 void mNNet::declareOptions(OptionList& ol)
00065 {
00066     declareOption(ol, "noutputs", &mNNet::noutputs,
00067                   OptionBase::buildoption,
00068                   "Number of outputs of the neural network, which can be derived from output_type and targetsize_\n");
00069 
00070     declareOption(ol, "hidden_layer_sizes", &mNNet::hidden_layer_sizes,
00071                   OptionBase::buildoption,
00072                   "Defines the architecture of the multi-layer neural network by\n"
00073                   "specifying the number of hidden units in each hidden layer.\n");
00074 
00075     declareOption(ol, "init_lrate", &mNNet::init_lrate,
00076                   OptionBase::buildoption,
00077                   "Initial learning rate\n");
00078 
00079     declareOption(ol, "lrate_decay", &mNNet::lrate_decay,
00080                   OptionBase::buildoption,
00081                   "Learning rate decay factor\n");
00082 
00083     // TODO Why this dependance on test_minibatch_size?
00084     declareOption(ol, "minibatch_size", &mNNet::minibatch_size,
00085                   OptionBase::buildoption,
00086                   "Update the parameters only so often (number of examples).\n"
00087                   "Must be greater or equal to test_minibatch_size\n");
00088 
00089     declareOption(ol, "output_type", 
00090                   &mNNet::output_type,
00091                   OptionBase::buildoption,
00092                   "type of output cost: 'cross_entropy' for binary classification,\n"
00093                   "'NLL' for classification problems, or 'MSE' for regression.\n");
00094 
00095     declareOption(ol, "output_layer_L1_penalty_factor",
00096                   &mNNet::output_layer_L1_penalty_factor,
00097                   OptionBase::buildoption,
00098                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00099                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| during training.\n"
00100                   "Gets multiplied by the learning rate. Only on output layer!!");
00101 
00102     declareOption(ol, "n_layers", &mNNet::n_layers,
00103                   OptionBase::learntoption,
00104                   "Number of layers of weights plus 1 (ie. 3 for a neural net with one hidden layer).\n"
00105                   "Needs not be specified explicitly (derived from hidden_layer_sizes).\n");
00106 
00107     declareOption(ol, "layer_sizes", &mNNet::layer_sizes,
00108                   OptionBase::learntoption,
00109                   "Derived from hidden_layer_sizes, inputsize_ and noutputs\n");
00110 
00111     declareOption(ol, "layer_params", &mNNet::layer_params,
00112                   OptionBase::learntoption,
00113                   "Parameters used while training, for each layer, organized as follows: layer_params[i] \n"
00114                   "is a matrix of dimension layer_sizes[i+1] x (layer_sizes[i]+1)\n"
00115                   "containing the neuron biases in its first column.\n");
00116 
00117     declareOption(ol, "cumulative_training_time", &mNNet::cumulative_training_time,
00118                   OptionBase::learntoption,
00119                   "Cumulative training time since age=0, in seconds.\n");
00120 
00121     // Now call the parent class' declareOptions
00122     inherited::declareOptions(ol);
00123 }
00124 
00125 // TODO - reloading an object will not work! layer_params will juste get lost.
00126 void mNNet::build_()
00127 {
00128     // *** Sanity checks ***
00129 
00130     if (!train_set)
00131         return;
00132     if (output_type=="MSE")
00133     {
00134         if (noutputs<0) noutputs = targetsize_;
00135         else PLASSERT_MSG(noutputs==targetsize_,"mNNet: noutputs should be -1 or match data's targetsize");
00136     }
00137     else if (output_type=="NLL")
00138     {
00139         // TODO add a check on noutput's value
00140         if (noutputs<0)
00141             PLERROR("mNNet: if output_type=NLL (classification), one \n"
00142                     "should provide noutputs = number of classes, or possibly\n"
00143                     "1 when 2 classes\n");
00144     }
00145     else if (output_type=="cross_entropy")
00146     {
00147         if(noutputs!=1)
00148             PLERROR("mNNet: if output_type=cross_entropy, then \n"
00149                     "noutputs should be 1.\n");
00150     }
00151     else PLERROR("mNNet: output_type should be cross_entropy, NLL or MSE\n");
00152 
00153     if( output_layer_L1_penalty_factor < 0. )
00154         PLWARNING("mNNet::build_ - output_layer_L1_penalty_factor is negative!\n");
00155 
00156     // *** Determine topology ***
00157     inputsize_ = train_set->inputsize();
00158     while (hidden_layer_sizes.length()>0 && hidden_layer_sizes[hidden_layer_sizes.length()-1]==0)
00159         hidden_layer_sizes.resize(hidden_layer_sizes.length()-1);
00160     n_layers = hidden_layer_sizes.length()+2; 
00161     layer_sizes.resize(n_layers);
00162     layer_sizes.subVec(1,n_layers-2) << hidden_layer_sizes;
00163     layer_sizes[0]=inputsize_;
00164     layer_sizes[n_layers-1]=noutputs;
00165 
00166     // *** Allocate memory for params and gradients ***
00167     int n_params=0;
00168     int n_neurons=0;
00169     for (int i=0;i<n_layers-1;i++)    {
00170         n_neurons+=layer_sizes[i+1];
00171         n_params+=layer_sizes[i+1]*(1+layer_sizes[i]);
00172     }
00173     all_params.resize(n_params);
00174     all_params_gradient.resize(n_params);
00175 
00176     // *** Set handles ***
00177     layer_params.resize(n_layers-1);
00178     layer_params_gradient.resize(n_layers-1);
00179     biases.resize(n_layers-1);
00180     weights.resize(n_layers-1);
00181 
00182     for (int i=0,p=0;i<n_layers-1;i++)    {
00183         int np=layer_sizes[i+1]*(1+layer_sizes[i]);
00184         layer_params[i]=all_params.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00185         biases[i]=layer_params[i].subMatColumns(0,1);
00186         weights[i]=layer_params[i].subMatColumns(1,layer_sizes[i]); // weights[0] from layer 0 to layer 1
00187         layer_params_gradient[i]=all_params_gradient.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00188         p+=np;
00189     }
00190 
00191     // *** Allocate memory for outputs and gradients on neurons ***
00192     neuron_extended_outputs.resize(minibatch_size,layer_sizes[0]+1+n_neurons+n_layers);
00193     neuron_gradients.resize(minibatch_size,n_neurons);
00194 
00195     // *** Set handles and biases ***
00196     neuron_outputs_per_layer.resize(n_layers); // layer 0 = input, layer n_layers-1 = output
00197     neuron_extended_outputs_per_layer.resize(n_layers); // layer 0 = input, layer n_layers-1 = output
00198     neuron_gradients_per_layer.resize(n_layers); // layer 0 not used
00199 
00200     int k=0, kk=0;
00201     for (int i=0;i<n_layers;i++)
00202     {
00203         neuron_extended_outputs_per_layer[i] = neuron_extended_outputs.subMatColumns(k,1+layer_sizes[i]);
00204         neuron_extended_outputs_per_layer[i].column(0).fill(1.0); // for biases
00205         neuron_outputs_per_layer[i]=neuron_extended_outputs_per_layer[i].subMatColumns(1,layer_sizes[i]);
00206         k+=1+layer_sizes[i];
00207         if(i>0) {
00208             neuron_gradients_per_layer[i] = neuron_gradients.subMatColumns(kk,layer_sizes[i]);
00209             kk+=layer_sizes[i];
00210         }
00211     }
00212 
00213     Profiler::activate();
00214 
00215 }
00216 
00217 // ### Nothing to add here, simply calls build_
00218 void mNNet::build()
00219 {
00220     inherited::build();
00221     build_();
00222 }
00223 
00224 
00225 void mNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00226 {
00227     inherited::makeDeepCopyFromShallowCopy(copies);
00228 
00229     deepCopyField(hidden_layer_sizes, copies);
00230     deepCopyField(layer_sizes, copies);
00231     deepCopyField(all_params, copies);
00232     deepCopyField(biases, copies);
00233     deepCopyField(weights, copies);
00234     deepCopyField(layer_params, copies);
00235     deepCopyField(all_params_gradient, copies);
00236     deepCopyField(layer_params_gradient, copies);
00237     deepCopyField(neuron_gradients, copies);
00238     deepCopyField(neuron_gradients_per_layer, copies);
00239     deepCopyField(neuron_extended_outputs, copies);
00240     deepCopyField(neuron_extended_outputs_per_layer, copies);
00241     deepCopyField(neuron_outputs_per_layer, copies);
00242     deepCopyField(targets, copies);
00243     deepCopyField(example_weights, copies);
00244     deepCopyField(train_costs, copies);
00245 }
00246 
00247 
00248 int mNNet::outputsize() const
00249 {
00250     return noutputs;
00251 }
00252 
00253 void mNNet::forget()
00254 {
00258     inherited::forget();
00259     for (int i=0;i<n_layers-1;i++)
00260     {
00261         real delta = 1/sqrt(real(layer_sizes[i]));
00262         random_gen->fill_random_uniform(weights[i],-delta,delta);
00263         biases[i].clear();
00264     }
00265     stage = 0;
00266     cumulative_training_time=0.0;
00267 }
00268 
00269 void mNNet::train()
00270 {
00271 
00272     if (inputsize_<0)
00273         build();
00274     if(!train_set)
00275         PLERROR("In NNet::train, you did not setTrainingSet");
00276     if(!train_stats)
00277         setTrainStatsCollector(new VecStatsCollector());
00278 
00279     targets.resize(minibatch_size,targetsize());  // the train_set's targetsize()
00280     example_weights.resize(minibatch_size);
00281 
00282     TVec<string> train_cost_names = getTrainCostNames() ;
00283     train_costs.resize(minibatch_size,train_cost_names.length()-2); 
00284     train_costs.fill(MISSING_VALUE) ;
00285     Vec costs_plus_time(train_costs.width()+2);
00286     costs_plus_time[train_costs.width()] = MISSING_VALUE;
00287     costs_plus_time[train_costs.width()+1] = MISSING_VALUE;
00288     Vec costs = costs_plus_time.subVec(0,train_costs.width());
00289 
00290     train_stats->forget();
00291 
00292     int b, sample, nsamples;
00293     nsamples = train_set->length();
00294     Vec input,target;   // TODO discard these variables.
00295 
00296     Profiler::reset("training");
00297     Profiler::start("training");
00298 
00299     for( ; stage<nstages; stage++)
00300     {
00301         sample = stage % nsamples;
00302         b = stage % minibatch_size;
00303         input = neuron_outputs_per_layer[0](b);
00304         target = targets(b);
00305         train_set->getExample(sample, input, target, example_weights[b]);
00306         if (b+1==minibatch_size) // TODO do also special end-case || stage+1==nstages)
00307         {
00308             onlineStep(stage, targets, train_costs, example_weights );
00309             for (int i=0;i<minibatch_size;i++)  {
00310                 costs << train_costs(b);    // TODO Is the copy necessary? Might be
00311                                             // better to waste some memory in
00312                                             // train_costs instead
00313                 train_stats->update( costs_plus_time );
00314             }
00315         }
00316     }
00317 
00318     Profiler::end("training");
00319     if (verbosity>0)
00320         Profiler::report(cout);
00321     // Take care of the timing stats.
00322     const Profiler::Stats& stats = Profiler::getStats("training");
00323     costs.fill(MISSING_VALUE);
00324     real ticksPerSec = Profiler::ticksPerSecond();
00325     real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec;
00326     cumulative_training_time += cpu_time;
00327     costs_plus_time[train_costs.width()] = cpu_time;
00328     costs_plus_time[train_costs.width()+1] = cumulative_training_time;
00329     train_stats->update( costs_plus_time );
00330     train_stats->finalize(); // finalize statistics for this epoch
00331 }
00332 
00333 void mNNet::onlineStep(int t, const Mat& targets,
00334                              Mat& train_costs, Vec example_weights)
00335 {
00336     PLASSERT(targets.length()==minibatch_size && train_costs.length()==minibatch_size && example_weights.length()==minibatch_size);
00337 
00338     fpropNet(minibatch_size);
00339     fbpropLoss(neuron_outputs_per_layer[n_layers-1],targets,example_weights,train_costs);
00340     bpropUpdateNet(t);
00341 
00342     l1regularizeOutputs();
00343 }
00344 
00345 void mNNet::computeOutput(const Vec& input, Vec& output) const
00346 {
00347     neuron_outputs_per_layer[0](0) << input;
00348     fpropNet(1);
00349     output << neuron_outputs_per_layer[n_layers-1](0);
00350 }
00351 
00353 void mNNet::fpropNet(int n_examples) const
00354 {
00355     PLASSERT_MSG(n_examples<=minibatch_size,"mNNet::fpropNet: nb input vectors treated should be <= minibatch_size\n");
00356     for (int i=0;i<n_layers-1;i++)
00357     {
00358         Mat prev_layer = neuron_extended_outputs_per_layer[i];
00359         Mat next_layer = neuron_outputs_per_layer[i+1];
00360         if (n_examples!=minibatch_size) {
00361             prev_layer = prev_layer.subMatRows(0,n_examples);
00362             next_layer = next_layer.subMatRows(0,n_examples);
00363         }
00364 
00365         // try to use BLAS for the expensive operation
00366         productScaleAcc(next_layer, prev_layer, false, layer_params[i], true, 1, 0);
00367 
00368         // compute layer's output non-linearity
00369         if (i+1<n_layers-1) {
00370             for (int k=0;k<n_examples;k++)  {
00371                 Vec L=next_layer(k);
00372                 compute_tanh(L,L);
00373             }
00374         }   else if (output_type=="NLL")    {
00375             for (int k=0;k<n_examples;k++)  {
00376                 Vec L=next_layer(k);
00377                 log_softmax(L,L);
00378             }
00379         }   else if (output_type=="cross_entropy")  {
00380             for (int k=0;k<n_examples;k++)  {
00381                 Vec L=next_layer(k);
00382                 log_sigmoid(L,L);
00383             }
00384          }
00385     }
00386 }
00387 
00389 void mNNet::fbpropLoss(const Mat& output, const Mat& target, const Vec& example_weight, Mat& costs) const
00390 {
00391     int n_examples = output.length();
00392     Mat out_grad = neuron_gradients_per_layer[n_layers-1];
00393     if (n_examples!=minibatch_size)
00394         out_grad = out_grad.subMatRows(0,n_examples);
00395     int target_class;
00396     Vec outp, grad;
00397     if (output_type=="NLL") {
00398         for (int i=0;i<n_examples;i++)  {
00399             target_class = int(round(target(i,0)));
00400             #ifdef BOUNDCHECK
00401             if(target_class>=noutputs)
00402                 PLERROR("In mNNet::fbpropLoss one target value %d is higher then allowed by nout %d",
00403                         target_class, noutputs);
00404             #endif          
00405             outp = output(i);
00406             grad = out_grad(i);
00407             exp(outp,grad); // map log-prob to prob
00408             costs(i,0) = -outp[target_class];
00409             costs(i,1) = (target_class == argmax(outp))?0:1;
00410             grad[target_class]-=1;
00411             if (example_weight[i]!=1.0)
00412                 costs(i,0) *= example_weight[i];
00413         }
00414     }
00415     else if(output_type=="cross_entropy")   {
00416         for (int i=0;i<n_examples;i++)  {
00417             target_class = int(round(target(i,0)));
00418             outp = output(i);
00419             grad = out_grad(i);
00420             exp(outp,grad); // map log-prob to prob
00421             if( target_class == 1 ) {
00422                 costs(i,0) = - outp[0];
00423                 costs(i,1) = (grad[0]>0.5)?0:1;
00424             }   else    {
00425                 costs(i,0) = - pl_log( 1.0 - grad[0] );
00426                 costs(i,1) = (grad[0]>0.5)?1:0;
00427             }
00428             grad[0] -= (real)target_class; // ?
00429             if (example_weight[i]!=1.0)
00430                 costs(i,0) *= example_weight[i];
00431         }
00432     }
00433     else // if (output_type=="MSE")
00434     {
00435         substract(output,target,out_grad);
00436         for (int i=0;i<n_examples;i++)  {
00437             costs(i,0) = pownorm(out_grad(i));
00438             if (example_weight[i]!=1.0) {
00439                 out_grad(i) *= example_weight[i];
00440                 costs(i,0) *= example_weight[i];
00441             }
00442         }
00443     }
00444 }
00445 
00448 void mNNet::bpropUpdateNet(int t)
00449 {
00450     // mean gradient over minibatch_size examples has less variance
00451     // can afford larger learning rate (divide by sqrt(minibatch)
00452     // instead of minibatch)
00453     real lrate = init_lrate/(1 + t*lrate_decay);
00454     lrate /= sqrt(real(minibatch_size));
00455 
00456     for (int i=n_layers-1;i>0;i--)  {
00457         // here neuron_gradients_per_layer[i] contains the gradient on
00458         // activations (weighted sums)
00459         //      (minibatch_size x layer_size[i])
00460         Mat previous_neurons_gradient = neuron_gradients_per_layer[i-1];
00461         Mat next_neurons_gradient = neuron_gradients_per_layer[i];
00462         Mat previous_neurons_output = neuron_outputs_per_layer[i-1];
00463 
00464         if (i>1) // if not first hidden layer then compute gradient on previous layer
00465         {
00466             // propagate gradients
00467             productScaleAcc(previous_neurons_gradient,next_neurons_gradient,false,
00468                             weights[i-1],false,1,0);
00469             // propagate through tanh non-linearity
00470             // TODO IN NEED OF OPTIMIZATION
00471             for (int j=0;j<previous_neurons_gradient.length();j++)  {
00472                 real* grad = previous_neurons_gradient[j];
00473                 real* out = previous_neurons_output[j];
00474                 for (int k=0;k<previous_neurons_gradient.width();k++,out++)
00475                     grad[k] *= (1 - *out * *out); // gradient through tanh derivative
00476             }
00477         }
00478         // compute gradient on parameters and update them in one go (more
00479         // efficient)
00480         productScaleAcc(layer_params[i-1],next_neurons_gradient,true,
00481                             neuron_extended_outputs_per_layer[i-1],false,
00482                             -lrate,1);
00483     }
00484 }
00485 
00488 void mNNet::bpropNet(int t)
00489 {
00490     for (int i=n_layers-1;i>0;i--)  {
00491         // here neuron_gradients_per_layer[i] contains the gradient on
00492         // activations (weighted sums)
00493         //      (minibatch_size x layer_size[i])
00494         Mat previous_neurons_gradient = neuron_gradients_per_layer[i-1];
00495         Mat next_neurons_gradient = neuron_gradients_per_layer[i];
00496         Mat previous_neurons_output = neuron_outputs_per_layer[i-1];
00497 
00498         if (i>1) // if not first hidden layer then compute gradient on previous layer
00499         {
00500             // propagate gradients
00501             productScaleAcc(previous_neurons_gradient,next_neurons_gradient,false,
00502                             weights[i-1],false,1,0);
00503             // propagate through tanh non-linearity
00504             // TODO IN NEED OF OPTIMIZATION
00505             for (int j=0;j<previous_neurons_gradient.length();j++)  {
00506                 real* grad = previous_neurons_gradient[j];
00507                 real* out = previous_neurons_output[j];
00508                 for (int k=0;k<previous_neurons_gradient.width();k++,out++)
00509                     grad[k] *= (1 - *out * *out); // gradient through tanh derivative
00510             }
00511         }
00512         // compute gradient on parameters 
00513         productScaleAcc(layer_params_gradient[i-1],next_neurons_gradient,true,
00514                             neuron_extended_outputs_per_layer[i-1],false,
00515                             1,0);
00516     }
00517 }
00518 
00519 void mNNet::l1regularizeOutputs()
00520 {
00521     // mean gradient over minibatch_size examples has less variance
00522     // can afford larger learning rate (divide by sqrt(minibatch)
00523     // instead of minibatch)
00524     real lrate = init_lrate/(1 + stage*lrate_decay);
00525     lrate /= sqrt(real(minibatch_size));
00526 
00527     // Output layer L1 regularization
00528     if( output_layer_L1_penalty_factor != 0. )    {
00529         real L1_delta = lrate * output_layer_L1_penalty_factor;
00530         real* m_i = layer_params[n_layers-2].data();
00531         for(int i=0; i<layer_params[n_layers-2].length();i++,m_i+=layer_params[n_layers-2].mod())  {
00532             for(int j=0; j<layer_params[n_layers-2].width(); j++)   {
00533                 if( m_i[j] > L1_delta )
00534                     m_i[j] -= L1_delta;
00535                 else if( m_i[j] < -L1_delta )
00536                     m_i[j] += L1_delta;
00537                 else
00538                     m_i[j] = 0.;
00539             }
00540         }
00541     }
00542 }
00543 
00544 void mNNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
00545                                            const Vec& target, Vec& costs) const
00546 {
00547     Vec w(1);
00548     w[0]=1;
00549     Mat outputM = output.toMat(1,output.length());
00550     Mat targetM = target.toMat(1,output.length());
00551     Mat costsM = costs.toMat(1,costs.length());
00552     fbpropLoss(outputM,targetM,w,costsM);
00553 }
00554 
00555 void mNNet::computeOutputs(const Mat& input, Mat& output) const
00556 {
00557     PLASSERT(test_minibatch_size<=minibatch_size);
00558     neuron_outputs_per_layer[0].subMat(0,0,input.length(),input.width()) << input;
00559     fpropNet(input.length());
00560     output << neuron_outputs_per_layer[n_layers-1].subMat(0,0,output.length(),output.width());
00561 }
00562 void mNNet::computeOutputsAndCosts(const Mat& input, const Mat& target, 
00563                                       Mat& output, Mat& costs) const
00564 {//TODO
00565     int n=input.length();
00566     PLASSERT(target.length()==n);
00567     output.resize(n,outputsize());
00568     costs.resize(n,nTestCosts());
00569     computeOutputs(input,output);
00570 
00571     Vec w(n);
00572     w.fill(1);
00573     fbpropLoss(output,target,w,costs);
00574 }
00575 
00576 TVec<string> mNNet::getTestCostNames() const
00577 {
00578     TVec<string> costs;
00579     if (output_type=="NLL")
00580     {
00581         costs.resize(3);
00582         costs[0]="NLL";
00583         costs[1]="class_error";
00584     }
00585     else if (output_type=="cross_entropy")  {
00586         costs.resize(3);
00587         costs[0]="cross_entropy";
00588         costs[1]="class_error";
00589     }
00590     else if (output_type=="MSE")
00591     {
00592         costs.resize(1);
00593         costs[0]="MSE";
00594     }
00595     return costs;
00596 }
00597 
00598 TVec<string> mNNet::getTrainCostNames() const
00599 {
00600     TVec<string> costs = getTestCostNames();
00601     costs.append("train_seconds");
00602     costs.append("cum_train_seconds");
00603     return costs;
00604 }
00605 
00606 
00607 } // end of namespace PLearn
00608 
00609 
00610 /*
00611   Local Variables:
00612   mode:c++
00613   c-basic-offset:4
00614   c-file-style:"stroustrup"
00615   c-file-offsets:((innamespace . 0)(inline-open . 0))
00616   indent-tabs-mode:nil
00617   fill-column:79
00618   End:
00619 */
00620 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines