PLearn 0.1
DeepNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DeepNNet.cc
00004 //
00005 // Copyright (C) 2005 Yoshua Bengio 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: DeepNNet.cc 6508 2006-12-15 02:35:49Z lamblin $ 
00037  ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio
00040 
00043 #include "DeepNNet.h"
00044 #include <time.h>                 
00045 #include <plearn/base/tostring.h>
00046 #include <plearn/math/random.h>
00047 #include <plearn/math/pl_math.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 DeepNNet::DeepNNet() 
00053 /* ### Initialize all fields to their default value here */
00054     :   training_time(0),
00055         n_layers(3),
00056         n_outputs(1),
00057         default_n_units_per_hidden_layer(10),
00058         L1_regularizer(1e-5),
00059         initial_learning_rate(1e-4),
00060         learning_rate_decay(1e-6),
00061         layerwise_learning_rate_adaptation(0),
00062         normalize_per_unit(0),
00063         normalize_percentage(0),
00064         normalize_activations(0),
00065         output_cost("mse"),
00066         add_connections(true),
00067         remove_connections(true),
00068         initial_sparsity(0.9),
00069         connections_adaptation_frequency(0),
00070         init_scale(1)
00071 {
00072 }
00073 
00074 PLEARN_IMPLEMENT_OBJECT(DeepNNet, 
00075                         "Deep multi-layer neural networks with sparse adaptive connections", 
00076                         "This feedforward neural network can have many layers, but its weight\n"
00077                         "matrices are sparse and can be optionally adapted (adding new connections\n"
00078                         "where that would create the largest gradient).");
00079 
00080 void DeepNNet::declareOptions(OptionList& ol)
00081 {
00082     declareOption(ol, "n_layers", &DeepNNet::n_layers, OptionBase::buildoption,
00083                   "Number of layers, including the output but not input layer");
00084 
00085     declareOption(ol, "n_outputs", &DeepNNet::n_outputs, OptionBase::buildoption,
00086                   "Number of units of output layer");
00087 
00088     declareOption(ol, "default_n_units_per_hidden_layer", &DeepNNet::default_n_units_per_hidden_layer, 
00089                   OptionBase::buildoption, "If n_units_per_layer is not specified, it is given by this value for all hidden layers");
00090 
00091     declareOption(ol, "n_units_per_layer", &DeepNNet::n_units_per_layer, OptionBase::buildoption,
00092                   "Number of units per layer, including the output but not input layer.\n"
00093                   "The last (output) layer number of units is the output size.");
00094 
00095     declareOption(ol, "L1_regularizer", &DeepNNet::L1_regularizer, OptionBase::buildoption,
00096                   "amount of penalty on sum_{l,i,j} |weights[l][i][j]|");
00097 
00098     declareOption(ol, "initial_learning_rate", &DeepNNet::initial_learning_rate, OptionBase::buildoption,
00099                   "learning_rate = initial_learning_rate/(1 + iteration*learning_rate_decay)\n"
00100                   "where iteration is incremented after each example is presented");
00101 
00102     declareOption(ol, "learning_rate_decay", &DeepNNet::learning_rate_decay, OptionBase::buildoption,
00103                   "see the comment for initial_learning_rate.");
00104 
00105     declareOption(ol, "layerwise_learning_rate_adaptation", &DeepNNet::layerwise_learning_rate_adaptation,
00106                   OptionBase::buildoption, "if 0 use stochastic gradient as usual, otherwise correct the\n"
00107                   "learning rates layerwise by multiplying by the ratio of average gradient norm\n"
00108                   "of the top layer by the i-th layer, to the power layerwise_learning_rate_adaptation.");
00109 
00110     declareOption(ol, "normalize_per_unit", &DeepNNet::normalize_per_unit,
00111                   OptionBase::buildoption, "Try balancing the norm of the weight gradient vectors per unit, rather than per weight\n");
00112 
00113     declareOption(ol, "normalize_percentage", &DeepNNet::normalize_percentage,
00114                   OptionBase::buildoption, "Try balancing the ratio the gradient to the weight squared, rather than the norm of the gradient\n");
00115 
00116     declareOption(ol, "normalize_activations", &DeepNNet::normalize_activations,
00117                   OptionBase::buildoption, "Try balancing the norm of the gradient on the activations, per layer\n");
00118 
00119     declareOption(ol, "output_cost", &DeepNNet::output_cost, OptionBase::buildoption,
00120                   "String-valued option specifies output non-linearity and cost:\n"
00121                   "  'mse': mean squared error for regression with linear outputs\n"
00122                   "  'NLL': negative log-likelihood of P(class|input) with softmax outputs");
00123 
00124     declareOption(ol, "add_connections", &DeepNNet::add_connections, OptionBase::buildoption,
00125                   "whether to add connections when the potential connections average"
00126                   "gradient becomes larger in magnitude than that of existing connections");
00127 
00128     declareOption(ol, "remove_connections", &DeepNNet::remove_connections, OptionBase::buildoption,
00129                   "whether to remove connections when their weight becomes too small");
00130 
00131     declareOption(ol, "initial_sparsity", &DeepNNet::initial_sparsity, OptionBase::buildoption,
00132                   "initial fraction of weights that are set to 0.");
00133 
00134     declareOption(ol, "connections_adaptation_frequency", &DeepNNet::connections_adaptation_frequency, 
00135                   OptionBase::buildoption, "after how many examples do we try to adapt connections?\n"
00136                   "if set to 0, this is interpreted as the training set size.");
00137 
00138     declareOption(ol, "init_scale", &DeepNNet::init_scale, OptionBase::buildoption,
00139                   "scaling factor of random initial weights range.");
00140 
00141     declareOption(ol, "sources", &DeepNNet::sources, OptionBase::learntoption, 
00142                   "The learned connectivity matrix at each layer\n"
00143                   "(source[l][i] = vector of indices of inputs of neuron i at layer l");
00144 
00145     declareOption(ol, "weights", &DeepNNet::weights, OptionBase::learntoption, 
00146                   "The learned weights at each layer\n"
00147                   "(weights[l][i] = vector of weights of inputs of neuron i at layer l");
00148 
00149     declareOption(ol, "biases", &DeepNNet::biases, OptionBase::learntoption, 
00150                   "The learned biases at each layer\n"
00151                   "(biases[l] = vector of biases of neurons at layer l");
00152 
00153     declareOption(ol, "layerwise_lr_factor", &DeepNNet::layerwise_lr_factor, OptionBase::learntoption, 
00154                   "The multiplicative factor for the learning rate at each layer");
00155 
00156     declareOption(ol, "layerwise_gradient_norm_ma", &DeepNNet::layerwise_gradient_norm_ma, OptionBase::learntoption, 
00157                   "The (moving) average of squared gradients at each layer");
00158 
00159     declareOption(ol, "training_time", &DeepNNet::training_time, OptionBase::learntoption, 
00160                   "The time spent during training (in seconds)");
00161 
00162     // Now call the parent class' declareOptions
00163     inherited::declareOptions(ol);
00164 }
00165 
00166 void DeepNNet::build_()
00167 {
00168     // ### This method should do the real building of the object,
00169     // ### according to set 'options', in *any* situation. 
00170     // ### Typical situations include:
00171     // ###  - Initial building of an object from a few user-specified options
00172     // ###  - Building of a "reloaded" object: i.e. from the complete set of all serialised options.
00173     // ###  - Updating or "re-building" of an object after a few "tuning" options have been modified.
00174     // ### You should assume that the parent class' build_() has already been called.
00175 
00176     // these would be -1 if a train_set has not be set already
00177     if (inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00178     {
00179         if (add_connections)
00180         {
00181             avg_weight_gradients.resize(n_layers);
00182             for (int l=0;l<n_layers;l++)
00183                 avg_weight_gradients[l].resize(n_units_per_layer[l+1],n_units_per_layer[l]);
00184         }
00185 
00186         if (layerwise_learning_rate_adaptation>0)
00187         {
00188             layerwise_lr_factor.resize(n_layers);
00189             layerwise_gradient_norm_ma.resize(n_layers);
00190             layerwise_gradient_norm.resize(n_layers);
00191             n_weights_of_layer.resize(n_layers);
00192             layerwise_lr_factor.fill(1.0);
00193             layerwise_gradient_norm_ma.clear();
00194         }
00195         bool do_initialize = false;
00196 
00197         if (sources.length() != n_layers) // in case we are called after loading the object we don't need to do this:
00198         {
00199             if (n_units_per_layer.length()==0)
00200             {
00201                 n_units_per_layer.resize(n_layers);
00202                 for (int l=0;l<n_layers-1;l++)
00203                     n_units_per_layer[l] = default_n_units_per_hidden_layer;
00204                 n_units_per_layer[n_layers-1] = n_outputs;
00205             }
00206             sources.resize(n_layers);
00207             weights.resize(n_layers);
00208             biases.resize(n_layers);
00209             for (int l=0;l<n_layers;l++)
00210             {
00211                 sources[l].resize(n_units_per_layer[l]);
00212                 weights[l].resize(n_units_per_layer[l]);
00213                 biases[l].resize(n_units_per_layer[l]);
00214                 int n_previous = (l==0)? int((1-initial_sparsity)*inputsize_) :
00215                     int((1-initial_sparsity)*n_units_per_layer[l-1]);
00216                 for (int i=0;i<n_units_per_layer[l];i++)
00217                 {
00218                     sources[l][i].resize(n_previous);
00219                     weights[l][i].resize(n_previous);
00220                 }
00221             }
00222             do_initialize = true;
00223         }
00224         activations.resize(n_layers+1);
00225         activations[0].resize(inputsize_);
00226         activations.resize(n_layers+1);
00227         activations_gradients.resize(n_layers);
00228         for (int l=0;l<n_layers;l++)
00229         {
00230             activations[l+1].resize(n_units_per_layer[l]);
00231             activations_gradients[l].resize(n_units_per_layer[l]);
00232         }
00233         if (do_initialize)
00234             initializeParams();
00235     }
00236 
00237 }
00238 
00239 void DeepNNet::initializeParams(bool set_seed)
00240 {
00241     if (set_seed) {
00242         if (seed_>=0)
00243             manual_seed(seed_);
00244         else
00245             PLearn::seed();
00246     }
00247     for (int l=0;l<n_layers;l++)
00248     {
00249         biases[l].clear();
00250         int n_previous = (l==0)?inputsize_:n_units_per_layer[l-1];
00251         int n_next = n_units_per_layer[l];
00252         if (initial_sparsity>0)
00253         {
00254             // first assign randomly some connections to each of the next layer unit
00255             int n_in = 1+int(0.66 * (1-initial_sparsity) * n_previous);
00256             if (n_in>n_previous) n_in=n_previous;
00257             int n_out = 1+int(0.66 * (1-initial_sparsity) * n_next);
00258             if (n_out>n_next) n_out=n_next;
00259             for (int i=0;i<n_next;i++)
00260             {
00261                 sources[l][i].resize(n_in);
00262                 random_subset_indices(sources[l][i],n_previous);
00263             }
00264             // then assign randomly some connections from each of the previous layer unit
00265             TVec<int> dest(n_out);
00266             for (int j=0;j<n_previous;j++)
00267             {
00268                 random_subset_indices(dest,n_next);
00269                 for (int k=0;k<n_out;k++)
00270                     if (!sources[l][dest[k]].contains(j))
00271                         sources[l][dest[k]].append(j);
00272             }
00273             for (int i=0;i<n_next;i++)
00274             {
00275                 int n_in = sources[l][i].length();
00276                 real delta = init_scale/sqrt((real)n_in);
00277                 weights[l][i].resize(n_in);
00278                 if (n_layers==1)
00279                     weights[l][i].fill(0);
00280                 else
00281                     fill_random_uniform(weights[l][i],-delta,delta);
00282             }
00283         }
00284         else // fully connected, mostly for debugging
00285         {
00286             // real delta = 1.0/sqrt((real)n_previous);
00287             real delta = init_scale/n_previous;
00288             for (int i=0;i<n_next;i++)
00289             {
00290                 sources[l][i].resize(n_previous);
00291                 weights[l][i].resize(n_previous);
00292                 for (int j=0;j<n_previous;j++)
00293                     sources[l][i][j] = j;
00294                 fill_random_uniform(weights[l][i],-delta,delta);
00295             }
00296         }
00297         if (layerwise_learning_rate_adaptation>0)
00298         {
00299             n_weights_of_layer[l]=0;
00300             for (int i=0;i<n_next;i++)
00301                 n_weights_of_layer[l] += sources[l][i].length();
00302         }
00303     }
00304 }
00305 
00306 // ### Nothing to add here, simply calls build_
00307 void DeepNNet::build()
00308 {
00309     inherited::build();
00310     build_();
00311 }
00312 
00313 
00314 void DeepNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00315 {
00316     inherited::makeDeepCopyFromShallowCopy(copies);
00317 
00318     deepCopyField(sources, copies);
00319     deepCopyField(weights, copies);
00320     deepCopyField(biases, copies);
00321     deepCopyField(layerwise_lr_factor, copies);
00322     deepCopyField(activations, copies);
00323     deepCopyField(activations_gradients, copies);
00324     deepCopyField(avg_weight_gradients, copies);
00325     deepCopyField(layerwise_gradient_norm_ma, copies);
00326     deepCopyField(layerwise_gradient_norm, copies);
00327     deepCopyField(n_weights_of_layer, copies);
00328     deepCopyField(n_units_per_layer, copies);
00329     deepCopyField(output_cost, copies);
00330 }
00331 
00332 
00333 int DeepNNet::outputsize() const
00334 {
00335     return n_units_per_layer[n_units_per_layer.length()-1];
00336 }
00337 
00338 void DeepNNet::forget()
00339 {
00340     if (train_set) initializeParams();
00341     stage = 0;
00342     training_time = 0;
00343 }
00344 
00345 void DeepNNet::fprop() const
00346 {
00347     for (int l=0;l<n_layers;l++)
00348     {
00349         int n_u = n_units_per_layer[l];
00350         Vec biases_l = biases[l];
00351         Vec previous_layer = activations[l];
00352         Vec next_layer = activations[l+1];
00353         for (int i=0;i<n_u;i++)
00354         {
00355             TVec<int> sources_i = sources[l][i];
00356             Vec weights_i = weights[l][i];
00357             int n_sources = sources_i.length();
00358             real s=biases_l[i];
00359             for (int k=0;k<n_sources;k++)
00360                 s += previous_layer[sources_i[k]] * weights_i[k];
00361             if (l+1<n_layers)
00362                 next_layer[i] = tanh(s);
00363             else next_layer[i] = s;
00364         }
00365     }
00366     if (output_cost == "NLL")
00367     {
00368         Vec output = activations[n_layers];
00369         softmax(output,output);
00370     }
00371 }
00372     
00373 void DeepNNet::train()
00374 {
00375     // The role of the train method is to bring the learner up to stage==nstages,
00376     // updating train_stats with training costs measured on-line in the process.
00377     clock_t start_train = clock();
00378     static Vec target;
00379     static Vec train_costs;
00380     target.resize(targetsize());
00381     if (output_cost=="mse")
00382         train_costs.resize(1);
00383     else
00384         train_costs.resize(2);
00385     real example_weight;
00386   
00387     if(!train_stats)  // make a default stats collector, in case there's none
00388         train_stats = new VecStatsCollector();
00389 
00390     if(nstages<stage) // asking to revert to a previous stage!
00391         forget();  // reset the learner to stage=0
00392     int initial_stage = stage;
00393 
00394     PP<ProgressBar> pb;
00395     if (report_progress) {
00396         pb = new ProgressBar("Training " + classname() + " from stage " + tostring(stage) + " to " + tostring(nstages), nstages - stage);
00397     }
00398 
00399     int n_examples = train_set->length();
00400 
00401     int t=stage*n_examples;
00402 
00403     while(stage<nstages)
00404     {
00405         // clear statistics of previous epoch
00406         train_stats->forget();
00407 
00408         if (layerwise_learning_rate_adaptation>0)
00409             layerwise_gradient_norm.clear();
00410 
00411         // train for 1 stage, and update train_stats,
00412         for (int ex=0;ex<n_examples;ex++, t++)
00413         {
00414             // get the (input,target) pair
00415             train_set->getExample(ex, activations[0], target, example_weight);
00416 
00417             // fprop
00418             fprop();
00419 
00420             // compute cost
00421 
00422             if (output_cost == "mse")
00423             {
00424                 substract(activations[n_layers],target,activations_gradients[n_layers-1]);
00425                 train_costs[0] = example_weight*pownorm(activations_gradients[n_layers-1]);
00426                 activations_gradients[n_layers-1] *= 2*example_weight; // 2 from the square
00427             }
00428             else if (output_cost == "NLL")
00429             {
00430                 Vec output = activations[n_layers];
00431                 int target_class = int(target[0]);
00432                 real p_target = output[target_class];
00433                 train_costs[0] = example_weight*(-safelog(p_target));
00434                 int recognized_class = argmax(output);
00435                 train_costs[1] = example_weight*(recognized_class!=target_class);
00436                 activations_gradients[n_layers-1] << output;
00437                 activations_gradients[n_layers-1][target_class] -= 1;
00438             }
00439             else PLERROR("DeepNNet: unknown output_cost = %s, expected mse or NLL",output_cost.c_str());
00440 
00441             // bprop + update + track avg gradient
00442 
00443             learning_rate = initial_learning_rate / (1 + t*learning_rate_decay);
00444 
00445             if (layerwise_learning_rate_adaptation>0 && normalize_activations)
00446             {
00447                 int l=n_layers-1;
00448                 Vec ag = activations_gradients[n_layers-1];
00449                 real& gn = layerwise_gradient_norm[l];
00450                 for (int i=0;i<n_outputs;i++)
00451                 {
00452                     real g = ag[i];
00453                     gn += g*g;
00454                 }
00455             }
00456             for (int l=n_layers-1;l>=0;l--)
00457             {
00458                 Vec biases_l = biases[l];
00459                 Vec next_layer = activations[l+1];
00460                 Vec previous_layer = activations[l];
00461                 int n_next = next_layer.length();
00462                 int n_previous = previous_layer.length();
00463                 Vec next_layer_gradient = activations_gradients[l];
00464                 Vec previous_layer_gradient;
00465                 if (l>0) 
00466                 {
00467                     previous_layer_gradient = activations_gradients[l-1];
00468                     previous_layer_gradient.clear();
00469                 }
00470                 real layer_learning_rate = learning_rate;
00471                 if (layerwise_learning_rate_adaptation>0)
00472                     layer_learning_rate *= layerwise_lr_factor[l];
00473 
00474                 for (int i=0;i<n_next;i++)
00475                 {
00476                     TVec<int> sources_i = sources[l][i];
00477                     Vec weights_i = weights[l][i];
00478                     int n_sources = sources_i.length();
00479                     real g_i = next_layer_gradient[i];
00480                     biases_l[i] -= learning_rate * g_i;
00481                     for (int k=0;k<n_sources;k++)
00482                     {
00483                         real w = weights_i[k];
00484                         int j=sources_i[k];
00485                         real sign_w = (w>0)?1:-1;
00486                         real grad = g_i * previous_layer[j];
00487                         weights_i[k] -= layer_learning_rate * (grad + L1_regularizer*sign_w);
00488                         if (l>0)   // THE IF COULD BE FACTORED OUT (more ugly but more efficient)
00489                             previous_layer_gradient[j] += g_i * w;
00490                         if (layerwise_learning_rate_adaptation>0 && !normalize_activations)  // THE IF COULD BE FACTORED OUT (more ugly but more efficient)
00491                         {
00492                             if (normalize_percentage)
00493                                 layerwise_gradient_norm[l] += grad*grad/(w*w);
00494                             else
00495                                 layerwise_gradient_norm[l] += grad*grad;
00496                         }
00497                     }
00498                 }
00499                 if (l>0)
00500                     for (int j=0;j<n_previous;j++) 
00501                     {
00502                         real a = previous_layer[j];
00503                         real& g = previous_layer_gradient[j];
00504                         g *= (1 - a*a);
00505                         if (layerwise_learning_rate_adaptation>0 && normalize_activations)
00506                             layerwise_gradient_norm[l-1] += g*g;
00507                     }
00508             }
00509             if (layerwise_learning_rate_adaptation>0)
00510             {
00511                 for (int l=0;l<n_layers;l++)
00512                 {
00513                     if (normalize_activations || normalize_per_unit)
00514                         layerwise_gradient_norm[l] /= n_units_per_layer[l]; // maybe we want larger weights, hence larger gradients where there are less terms in the sum, i.e. less weights
00515                     else // normalize per weight
00516                         layerwise_gradient_norm[l] /= n_weights_of_layer[l];
00517                     layerwise_gradient_norm_ma[l] = (1-learning_rate) * layerwise_gradient_norm_ma[l] + learning_rate * layerwise_gradient_norm[l];
00518                     layerwise_lr_factor[l] = pow(layerwise_gradient_norm_ma[n_layers-1]/layerwise_gradient_norm_ma[l],
00519                                                  0.5*layerwise_learning_rate_adaptation);
00520                 }
00521             }
00522             train_stats->update(train_costs);
00523         }
00524 
00525         ++stage;
00526         train_stats->finalize(); // finalize statistics for this epoch
00527         if (report_progress)
00528             pb->update(stage - initial_stage);
00529     }
00530     training_time += real(clock() - start_train) / real(CLOCKS_PER_SEC);
00531 }
00532 
00533 
00534 void DeepNNet::computeOutput(const Vec& input, Vec& output) const
00535 {
00536     output.resize(outputsize());
00537     activations[0] << input;
00538     fprop();
00539     output << activations[n_layers];
00540 }    
00541 
00542 void DeepNNet::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00543                                        const Vec& target, Vec& costs) const
00544 {
00545     costs.resize(0);
00546     if (output_cost == "mse")
00547     {
00548         costs.append(powdistance(output,target));
00549     }
00550     else if (output_cost == "NLL")
00551     {
00552         int target_class = int(target[0]);
00553         real p_target = output[target_class];
00554         costs.append(-safelog(p_target));
00555         int recognized_class = argmax(output);
00556         costs.append(recognized_class!=target_class);
00557     }
00558     else PLERROR("DeepNNet: unknown output_cost = %s, expected mse or NLL",output_cost.c_str());
00559     costs.append(real(nstages));
00560     costs.append(training_time);
00561 }                                
00562 
00563 TVec<string> DeepNNet::getTestCostNames() const
00564 {
00565     TVec<string> names;
00566     if (output_cost == "mse")
00567     {
00568         names.append("mse");
00569     } else // "NLL"
00570     {
00571         names.append("NLL");
00572         names.append("class_error");
00573     }
00574     names.append("nstages");
00575     names.append("training_time");
00576     return names;
00577 }
00578 
00579 TVec<string> DeepNNet::getTrainCostNames() const
00580 {
00581     return getTestCostNames();
00582 }
00583 
00584 
00585 } // end of namespace PLearn
00586 
00587 
00588 /*
00589   Local Variables:
00590   mode:c++
00591   c-basic-offset:4
00592   c-file-style:"stroustrup"
00593   c-file-offsets:((innamespace . 0)(inline-open . 0))
00594   indent-tabs-mode:nil
00595   fill-column:79
00596   End:
00597 */
00598 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines