PLearn 0.1
IncrementalNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // IncrementalNNet.cc
00004 //
00005 // Copyright (C) 2005 Yoshua Bengio, Mantas Lukosevicius 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: IncrementalNNet.cc 3994 2005-08-25 13:35:03Z chapados $ 
00037  ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio & Mantas Lukosevicius
00040 
00044 #include "IncrementalNNet.h"
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 IncrementalNNet::IncrementalNNet() 
00050     : internal_weights(0),
00051       internal_weight_gradients(0),
00052       candidate_unit_bias(0),
00053       n_examples_seen(0),
00054       current_average_cost(0),
00055       next_average_cost(0),
00056       n_examples_training_candidate(0),
00057       current_example(0),
00058       n_outputs(1),
00059       output_weight_decay(0),
00060       online(true),
00061       minibatch_size(0),
00062       output_cost_type("squared_error"),
00063       boosting(false),
00064       minimize_local_cost(false),
00065       hard_activation_function(false),
00066       use_hinge_loss_for_hard_activation(true),
00067       initial_learning_rate(0.01),
00068       decay_factor(1e-6),
00069       max_n_epochs_to_fail(1),
00070       rand_range(1),
00071       enable_internal_weights(false),
00072       incremental_connections(false),
00073       connection_gradient_threshold(0.5),
00074       connection_removing_threshold(0.0),
00075       residual_correlation_gradient(true)
00076 {
00077 }
00078 
00079 PLEARN_IMPLEMENT_OBJECT(IncrementalNNet, 
00080                         "Incremental one-hidden-layer neural network with L1 regularization of output weights",
00081                         "Stops either when the number of hidden units (==stage) reaches the user-specified\n"
00082                         "maximum (nstages) or when it does not appear possible to add a hidden unit without\n"
00083                         "increasing the penalized cost.");
00084 
00085 void IncrementalNNet::declareOptions(OptionList& ol)
00086 {
00087 
00088     declareOption(ol, "n_outputs", &IncrementalNNet::n_outputs, OptionBase::buildoption,
00089                   "Number of output units. Must be coherent with output_cost_type and targetsize:\n"
00090                   "n_outputs==targetsize for 'squared_error', and targetsize==1 && n_outputs==n_classes for\n"
00091                   "hinge_loss and discrete_log_likelihood.\n");
00092 
00093     declareOption(ol, "output_weight_decay", &IncrementalNNet::output_weight_decay, OptionBase::buildoption,
00094                   "L1 regularizer's penalty factor on output weights.");
00095 
00096     declareOption(ol, "online", &IncrementalNNet::online, OptionBase::buildoption,
00097                   "use online or batch version? only consider adding a hidden unit after minibatch_size examples\n"
00098                   "Add a hidden unit only if it would reduce the average cost (including the L1 penalty).\n"
00099                   "This current_average_cost is calculated either with a moving average over a moving target (online version)\n"
00100                   "or the algorithm proceeds in two phases (batch version): on even batches one improves the\n"
00101                   "tentative hidden unit, while on odd batches one evaluates its quality.\n");
00102 
00103     declareOption(ol, "minibatch_size", &IncrementalNNet::minibatch_size, OptionBase::buildoption,
00104                   "0 is a special value meaning minibatch_size == training set size.\n"
00105                   "After a hidden unit is added, wait at least that number of examples before considering\n"
00106                   "to add a new one.\n");
00107 
00108     declareOption(ol, "output_cost_type", &IncrementalNNet::output_cost_type, OptionBase::buildoption,
00109                   "'squared_error', 'hinge_loss', 'discrete_log_likelihood' (for probabilistic classification).\n");
00110 
00111     declareOption(ol, "boosting", &IncrementalNNet::boosting, OptionBase::buildoption,
00112                   "use a boosting-like approach (only works with online=false) and train the new hidden unit \n"
00113                   "but not the previous ones; also descend not the actual cost but a weighted cost obtained\n"
00114                   "from the gradient of the output cost on the hidden unit function (see minimize_local_cost option).\n");
00115 
00116     declareOption(ol, "minimize_local_cost", &IncrementalNNet::minimize_local_cost, OptionBase::buildoption,
00117                   "if true then instead of minimize global cost sum_t Q(f(x_t),y_t),\n"
00118                   "each hidden unit minimizes sum_t Q'(f(x_t),y_t) h(x_t)\n"
00119                   "or some approximation of it if h is a hard threshold (weighted logistic regression cost\n"
00120                   "with targets sign(Q'(f(x_t),y_t)) and weights |Q'(f(x_t),y_t)|),\n"
00121                   "where Q is the output cost, f(x_t) is the current prediction, y_t the target, h(x_t) the\n"
00122                   "output of the new hidden unit.\n");
00123 
00124     declareOption(ol, "hard_activation_function", &IncrementalNNet::hard_activation_function, OptionBase::buildoption,
00125                   "if true then h(x) = sign(w'x + b), else h(x) = tanh(w'x + b).\n");
00126 
00127     declareOption(ol, "use_hinge_loss_for_hard_activation", &IncrementalNNet::use_hinge_loss_for_hard_activation, OptionBase::buildoption,
00128                   "use hinge loss or cross-entropy to train hidden units, when hard_activation_function\n");
00129 
00130     declareOption(ol, "initial_learning_rate", &IncrementalNNet::initial_learning_rate, OptionBase::buildoption,
00131                   "learning_rate = initial_learning_rate / (1 + n_examples_seen * decay_factor).\n");
00132 
00133     declareOption(ol, "decay_factor", &IncrementalNNet::decay_factor, OptionBase::buildoption,
00134                   "decay factor in learning_rate formula.\n");
00135 
00136     declareOption(ol, "max_n_epochs_to_fail", &IncrementalNNet::max_n_epochs_to_fail, OptionBase::buildoption,
00137                   "Maximum number of epochs (not necessarily an integer) to try improving the new hidden unit\n"
00138                   "before declaring failure to improve the regularized cost (and thus stopping training).\n");
00139                 
00140     declareOption(ol, "rand_range", &IncrementalNNet::rand_range, OptionBase::buildoption,
00141                   "Interval of random numbers when initializing weights/biases: (-rand_range/2, rand_range/2).\n");
00142 
00143     declareOption(ol, "enable_internal_weights", &IncrementalNNet::enable_internal_weights, OptionBase::buildoption,
00144                   "Network has a cascade topology (each hidden unit has connections to all previous ones) if true,\n" 
00145                   "or a one hidden layer topology if false (default).\n");
00146   
00147     declareOption(ol, "incremental_connections", &IncrementalNNet::incremental_connections, OptionBase::buildoption,
00148                   "Add hidden connections incrementally if true, or all at once with a new unit if false (default).\n"
00149                   "This option is only supported with n_outputs == 1." );
00150 
00151     declareOption(ol, "connection_gradient_threshold", &IncrementalNNet::connection_gradient_threshold, OptionBase::buildoption,
00152                   "Threshold of gradient for connection to be added, when incremental_connections == true." );
00153 
00154     declareOption(ol, "connection_removing_threshold", &IncrementalNNet::connection_removing_threshold, OptionBase::buildoption,
00155                   "Connections are removed for which |weight|+|MAgradient| < connection_removing_threshold.\n"
00156                   "Default value is 0 (connections are not removed). Ednabled by incremental_connections." );
00157 
00158     declareOption(ol, "residual_correlation_gradient", &IncrementalNNet::residual_correlation_gradient, OptionBase::buildoption,
00159                   "Use residual correlation gradient (ConvexNN) if true (default), or classical error back-propagation if false." );
00160 
00161 
00162     //declareOption(ol, "", &IncrementalNNet::, OptionBase::buildoption,
00163     
00164     declareOption(ol, "direct_weights", &IncrementalNNet::direct_weights, OptionBase::learntoption,
00165                   "matrix of direct [output, input] weights.\n");
00166 
00167     declareOption(ol, "direct_weight_gradients", &IncrementalNNet::direct_weight_gradients, OptionBase::learntoption,
00168                   "Moving average gradients on matrix of direct [output, input] weights.\n");
00169   
00170     declareOption(ol, "output_weights", &IncrementalNNet::output_weights, OptionBase::learntoption,
00171                   "matrix of [hidden_unit, output] output weights.\n"
00172                   "** NOTE IT IS TRANSPOSED ** with respect to\n"
00173                   "the 'natural' index order, so as to easily add hidden units.\n");
00174     
00175     declareOption(ol, "output_weight_gradients", &IncrementalNNet::output_weight_gradients, OptionBase::learntoption,
00176                   "Moving average gradients on matrix of [hidden_unit, output] output weights\n"
00177                   "(enabled by residual_correlation_gradient && outputsize() > 1).\n"
00178                   "** NOTE IT IS TRANSPOSED ** with respect to\n"
00179                   "the 'natural' index order, so as to easily add hidden units.\n");
00180 
00181     declareOption(ol, "output_biases", &IncrementalNNet::output_biases, OptionBase::learntoption,
00182                   "vector of output biases\n");
00183 
00184     declareOption(ol, "hidden_layer_weights", &IncrementalNNet::hidden_layer_weights, OptionBase::learntoption,
00185                   "matrix of weights from input to hidden units: [hidden_unit, input].\n");
00186   
00187     declareOption(ol, "hidden_layer_weight_gradients", &IncrementalNNet::hidden_layer_weight_gradients, OptionBase::learntoption,
00188                   "Moving average gradients on hidden_layer_weights (enabled by incremental_connections).\n");
00189 
00190     declareOption(ol, "internal_weights", &IncrementalNNet::internal_weights, OptionBase::learntoption,
00191                   "weights among hidden units [to, from] in cascade architecture (enabled by enable_internal_weights).\n");
00192 
00193     declareOption(ol, "internal_weight_gradients", &IncrementalNNet::internal_weight_gradients, OptionBase::learntoption,
00194                   "Moving average gradients on internal_weights (enabled by incremental_connections).\n");
00195 
00196     declareOption(ol, "hidden_layer_biases", &IncrementalNNet::hidden_layer_biases, OptionBase::learntoption,
00197                   "vector of biases of the hidden units.\n");
00198 
00199     declareOption(ol, "candidate_unit_weights", &IncrementalNNet::candidate_unit_weights, OptionBase::learntoption,
00200                   "vector of weights from input to next candidate hidden unit.\n");
00201 
00202     declareOption(ol, "candidate_unit_weight_gradients", &IncrementalNNet::candidate_unit_weight_gradients, OptionBase::learntoption,
00203                   "Moving average gradients on candidate_unit_weights (enabled by incremental_connections).\n");
00204 
00205     declareOption(ol, "candidate_unit_bias", &IncrementalNNet::candidate_unit_bias, OptionBase::learntoption,
00206                   "bias parameter of next candidate hidden unit.\n");
00207 
00208     declareOption(ol, "candidate_unit_output_weights", &IncrementalNNet::candidate_unit_output_weights, OptionBase::learntoption,
00209                   "vector of weights from next candidate hidden unit to outputs.\n");
00210   
00211     declareOption(ol, "candidate_unit_output_weight_gradients", &IncrementalNNet::candidate_unit_output_weight_gradients,
00212                   OptionBase::learntoption,
00213                   "Moving average gradients on vector of weights from next candidate hidden unit to outputs.\n"
00214                   "(enabled by residual_correlation_gradient && outputsize() > 1).\n");
00215       
00216     declareOption(ol, "candidate_unit_internal_weights", &IncrementalNNet::candidate_unit_internal_weights, OptionBase::learntoption,
00217                   "vector of weights from previous hidden units to the candidate unit (enabled by enable_internal_weights).\n");
00218 
00219     declareOption(ol, "candidate_unit_internal_weight_gradients", &IncrementalNNet::candidate_unit_internal_weight_gradients,
00220                   OptionBase::learntoption,
00221                   "Moving average gradients on candidate_unit_internal_weights (enabled by incremental_connections).\n");
00222 
00223     declareOption(ol, "n_examples_seen", &IncrementalNNet::n_examples_seen, OptionBase::learntoption,
00224                   "number of training examples seen (= number of updates done) seen beginning of training.\n");
00225 
00226     declareOption(ol, "current_average_cost", &IncrementalNNet::current_average_cost, OptionBase::learntoption,
00227                   "current average cost, including fitting and regularization terms. It is computed\n"
00228                   "differently according to the online and minibatch_size options.\n");
00229 
00230     declareOption(ol, "next_average_cost", &IncrementalNNet::next_average_cost, OptionBase::learntoption,
00231                   "average cost if candidate hidden unit was included. It is computed like current_average_cost.\n");
00232 
00233     declareOption(ol, "n_examples_training_candidate", &IncrementalNNet::n_examples_training_candidate, OptionBase::learntoption,
00234                   "number of examples seen since started to train current candidate hidden unit. Used in\n"
00235                   "stopping criterion: stop when n_examples_training_candidate >= max_n_epochs_to_fail * train_set->length().\n");
00236 
00237     // Now call the parent class' declareOptions
00238     inherited::declareOptions(ol);
00239 }
00240 
00241 void IncrementalNNet::build_()
00242 {
00243     if (output_cost_type=="squared_error")
00244         cost_type=0;
00245     else if (output_cost_type=="hinge_loss")
00246         cost_type=1;
00247     else if (output_cost_type=="discrete_log_likelihood")
00248         cost_type=2;
00249     else PLERROR("IncrementalNNet:build: output_cost_type should either be 'squared_error', 'hinge_loss', or 'discrete_log_likelihood'");
00250   
00251     if(!train_set) return;
00252       
00253     direct_weights.resize(n_outputs,inputsize_);
00254     output_weights.resize(stage,n_outputs);
00255     output_biases.resize(n_outputs);
00256     hidden_layer_weights.resize(stage,inputsize_);
00257     hidden_layer_biases.resize(stage);
00258  
00259     linear_output.resize(n_outputs);
00260     act.resize(stage);
00261     h.resize(stage);
00262   
00263     candidate_unit_output_weights.resize(n_outputs);
00264     candidate_unit_weights.resize(inputsize_);
00265   
00266     if ( enable_internal_weights ) {
00267         internal_weights.resize(stage); //.clear();
00268         candidate_unit_internal_weights.resize(stage);
00269     }
00270     if ( incremental_connections ) {
00271         direct_weight_gradients.resize(n_outputs,inputsize_);
00272         hidden_layer_weight_gradients.resize(stage,inputsize_);
00273         candidate_unit_weight_gradients.resize(inputsize_);
00274         if ( enable_internal_weights ) {
00275             internal_weight_gradients.resize(stage);
00276             candidate_unit_internal_weight_gradients.resize(stage);
00277         }
00278     } 
00279     if ( residual_correlation_gradient & n_outputs > 1 ) {
00280         output_weight_gradients.resize(stage,n_outputs);
00281         candidate_unit_output_weight_gradients.resize(n_outputs);
00282     }
00283 
00284 }
00285 
00286 // ### Nothing to add here, simply calls build_
00287 void IncrementalNNet::build()
00288 {
00289     inherited::build();
00290     build_();
00291 }
00292 
00293 
00294 void IncrementalNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00295 {
00296     inherited::makeDeepCopyFromShallowCopy(copies);
00297 
00298     deepCopyField(direct_weights, copies);
00299     deepCopyField(direct_weight_gradients, copies);
00300     deepCopyField(output_weights, copies);
00301     deepCopyField(output_weight_gradients, copies);
00302     deepCopyField(output_biases, copies);
00303     deepCopyField(hidden_layer_weights, copies);
00304     deepCopyField(hidden_layer_weight_gradients, copies);
00305     deepCopyField(hidden_layer_biases, copies);
00306     deepCopyField(internal_weights, copies);
00307     deepCopyField(internal_weight_gradients, copies);
00308     deepCopyField(candidate_unit_weights, copies);  
00309     deepCopyField(candidate_unit_weight_gradients, copies);
00310     deepCopyField(candidate_unit_output_weights, copies);
00311     deepCopyField(candidate_unit_output_weight_gradients, copies);
00312     deepCopyField(candidate_unit_internal_weights, copies);
00313     deepCopyField(candidate_unit_internal_weight_gradients, copies);
00314     deepCopyField(act, copies);
00315     deepCopyField(h, copies);
00316     deepCopyField(linear_output, copies);
00317 }
00318 
00319 
00320 int IncrementalNNet::outputsize() const
00321 {
00322     return n_outputs;
00323 }
00324 
00325 void IncrementalNNet::forget()
00326 {
00327     // reset the number of hidden units to 0 = stage
00328     stage=0;
00329     n_examples_seen=0;
00330     current_average_cost=0;
00331     next_average_cost=0;
00332     current_example=0;
00333     // resize all the matrices, vectors with stage=0
00334     build_();
00335   
00336     candidate_unit_output_weights.fill(0.1);
00337     candidate_unit_bias = ((real)rand()/RAND_MAX - 0.5)*rand_range;
00338     if (!incremental_connections) {
00339         for( int i=0; i < inputsize_; i++ )
00340             candidate_unit_weights[i] = ((real)rand()/RAND_MAX - 0.5)*rand_range; 
00341     } else {
00342         direct_weights.fill(0.0);
00343         direct_weight_gradients.fill(0.0);
00344         candidate_unit_weights.fill(0.0);
00345         candidate_unit_weight_gradients.fill(0.0);
00346     }
00347     if ( residual_correlation_gradient && n_outputs > 1 ){
00348         candidate_unit_output_weight_gradients.fill(0.0);
00349     }
00350 }
00351     
00352 void IncrementalNNet::train()
00353 {
00354     // The role of the train method is to bring the learner up to stage==nstages,
00355     // updating train_stats with training costs measured on-line in the process.
00356 
00357     if (!train_set)
00358         PLERROR("IncrementalNNet::train train_set must be set before calling train\n");
00359     if (output_cost_type == "squared_error" &&
00360         train_set->targetsize() != n_outputs)
00361         PLERROR("IncrementalNNet::train with 'squared_error' output_cost_type, train_set->targetsize(%d) should equal n_outputs(%d)",
00362                 train_set->targetsize(),n_outputs);
00363     if ((output_cost_type == "hinge_loss" || output_cost_type == "discrete_log_likelihood") &&
00364         train_set->targetsize()!=1)
00365         PLERROR("IncrementalNNet::train 'hinge_loss' or 'discrete_log_likelihood' output_cost_type is for classification, train_set->targetsize(%d) should be 1",
00366                 train_set->targetsize());
00367 //  if ( incremental_connections && n_outputs != 1 )
00368 //    PLERROR("IncrementalNNet::train incremental_connections is only supported with n_outputs == 1\n");
00369 
00370     int minibatchsize = minibatch_size;
00371     if (minibatch_size == 0)
00372         minibatchsize = train_set->length();
00373 
00374     real current_average_class_error=0;
00375     real next_average_class_error=0;
00376     real old_current_average_cost;
00377     real old_next_average_cost;
00378 
00379 
00380     static Vec input;  // static so we don't reallocate/deallocate memory each time...
00381     static Vec output;
00382     static Vec target; // (but be careful that static means shared!)
00383     static Vec train_costs;
00384     static Vec costs_with_candidate;
00385     static Vec output_gradient;
00386     static Vec hidden_gradient;
00387     static Vec output_with_candidate;
00388     static Vec output_gradient_with_candidate;
00389     static Vec output_with_signchange;
00390     static Mat candidate_unit_output_weights_mat;
00391     static Vec candidate_h_vec;
00392     static Vec candidate_hidden_gradient;
00393     static Vec linear_output_with_candidate;
00394     int nc=nTrainCosts();
00395     train_costs.resize(nc);
00396     costs_with_candidate.resize(nc);
00397     input.resize(inputsize());    // the train_set's inputsize()
00398     output.resize(n_outputs);    
00399     output_gradient.resize(n_outputs);    
00400     hidden_gradient.resize(stage);    
00401     output_with_candidate.resize(n_outputs);
00402     output_gradient_with_candidate.resize(n_outputs);
00403     output_with_signchange.resize(n_outputs);    
00404     target.resize(targetsize());  // the train_set's targetsize()
00405     candidate_unit_output_weights_mat = candidate_unit_output_weights.toMat(n_outputs,1);
00406     candidate_h_vec.resize(1);
00407     candidate_hidden_gradient.resize(1);
00408     linear_output_with_candidate.resize(n_outputs);
00409     real sampleweight; // the train_set's weight on the current example
00410 
00411     if(!train_stats)  // make a default stats collector, in case there's none
00412         train_stats = new VecStatsCollector();
00413 
00414     if(nstages<stage) // asking to revert to a previous stage!
00415         forget();  // reset the learner to stage=0
00416 
00417     bool stopping_criterion_not_met = true;
00418 
00419     moving_average_coefficient = 1.0/minibatchsize;
00420     learning_rate = initial_learning_rate;
00421 
00422     while(stage<nstages && stopping_criterion_not_met)
00423     {
00424         // clear statistics of previous epoch
00425         train_stats->forget() ;
00426   
00427         // iterate through the data for some time...
00428         do 
00429         {
00430             // compute output and cost
00431             train_set->getExample(current_example, input, target, sampleweight);
00432             current_example++; 
00433             if (current_example==train_set->length()) current_example=0;
00434             computeOutput(input,output);
00435             computeCostsFromOutputs(input,output,target,train_costs);
00436             real current_total_cost = train_costs[0];
00437             real current_fit_error = train_costs[1];
00438             real current_class_error = (cost_type!=0)?train_costs[3]:0;
00439             train_costs*=sampleweight;
00440             train_stats->update(train_costs);
00441             // compute output and cost IF WE USED THE CANDIDATE HIDDEN UNIT
00442             real candidate_act = 
00443                 dot(input, candidate_unit_weights) + candidate_unit_bias;
00444             if ( enable_internal_weights && stage > 0 ) 
00445                 candidate_act += dot( h, candidate_unit_internal_weights );
00446             real candidate_h;
00447             if (hard_activation_function)
00448                 candidate_h = sign(candidate_act);
00449             else
00450                 candidate_h = tanh(candidate_act);
00451             candidate_h_vec[0]=candidate_h;
00452             // linear_output_with_candidate = linear_output + candidate_unit_output_weight*candidate_h;
00453             multiplyAdd(linear_output,candidate_unit_output_weights,
00454                         candidate_h,linear_output_with_candidate);
00455             if (cost_type == 2) // "discrete_log_likelihood"
00456                 softmax(linear_output_with_candidate,output_with_candidate);
00457             else
00458                 output_with_candidate << linear_output_with_candidate;
00459             computeCostsFromOutputs(input,output_with_candidate,target,costs_with_candidate); 
00460             // computeCostsFromOutputs does not count the cost of the candidate's output weights, so add it:
00461             costs_with_candidate[0] += output_weight_decay * sumabs(candidate_unit_output_weights);
00462             real candidate_class_error = (cost_type!=0)?costs_with_candidate[3]:0;
00463 
00464             if ( decay_factor != 0.0 ) 
00465                 learning_rate = initial_learning_rate / ( 1 + n_examples_seen * decay_factor );
00466       
00467             // TRAINING OF THE NETWORK
00468             // backprop & update regular network parameters // TRAINING OF THE NETWORK
00469             if (!boosting) // i.e. continue training the existing hidden units
00470             {
00471                 // ** compute gradient on linear output
00472                 output_loss_gradient(output, target, output_gradient, sampleweight);
00473 
00474                 // ** bprop through the network & update
00475 
00476                 // bprop on output layer
00477                 multiplyAcc(output_biases, output_gradient, -learning_rate);
00478         
00479                 if (!incremental_connections){
00480                     for ( int i = 0; i < n_outputs; i++ )
00481                         multiplyAcc( direct_weights(i), input, output_gradient[i]*(-learning_rate) );
00482                 } else {
00483                     for ( int i = 0; i < n_outputs; i++ )
00484                         update_incremental_connections( direct_weights(i), direct_weight_gradients(i), input, output_gradient[i] );
00485                 }
00486          
00487                 if (stage>0)
00488                 {
00489                     // the method below does:
00490                     // hidden_gradient[j] = sum_i output_weights[j,i]*output_gradient[i]
00491                     // output_weights[i,j] -= learning_rate * (output_gradient[i] * h[j] + output_weight_decay * sign(output_weights[i,j]))
00492                     transposedLayerL1BpropUpdate(hidden_gradient, output_weights, h, output_gradient, learning_rate, output_weight_decay);
00493           
00494                     if ( residual_correlation_gradient ) {
00495                         if ( n_outputs > 1 ){
00496                             for ( int i = 0; i < stage; i++ ) { // calculate output_weight_gradients
00497                                 residual_correlation_output_gradient( output_weight_gradients(i), output_weights(i), output_gradient, h[i], 
00498                                                                       hidden_gradient[i] );
00499                             }             
00500                         } else hidden_gradient.fill(output_gradient[0]);           
00501                     }
00502           
00503                     if ( !enable_internal_weights ){  // simple one-hidden-layer topology 
00504                         // bprop through hidden units activation
00505                         if (hard_activation_function) 
00506                             // Should h_i(x) change of sign?
00507                             // Consider the loss that would occur if it did, i.e. with output replaced by output - 2*W[.,i]*h_i(x)
00508                             // Then consider a weighted classification problem
00509                             // with the appropriate sign and weight = gradient on h_i(x).
00510                         {
00511                             for (int i=0;i<int(stage);i++) // loop over hidden units
00512                             {
00513                                 Vec Wi = output_weights(i);
00514                                 multiplyAdd(output,Wi,-2*h[i],output_with_signchange);
00515                                 real fit_error_with_sign_change = output_loss(output_with_signchange,target);
00516                                 int target_i = int(sign(fit_error_with_sign_change-current_fit_error)*h[i]);
00517                                 real weight_i = fabs(hidden_gradient[i]); // CHECK: when is the sign of hidden_gradient different from (h[i]-target_i)?
00518                                 if (use_hinge_loss_for_hard_activation)
00519                                     hidden_gradient[i] = weight_i * d_hinge_loss(act[i],target_i);
00520                                 else // use cross-entropy
00521                                     hidden_gradient[i] = weight_i * (sigmoid(act[i]) - 2*(target_i+1));
00522                             }
00523                         }
00524                         else
00525                             bprop_tanh(h,hidden_gradient,hidden_gradient);  //  hidden_gradient *= ( 1 - h^2 )
00526                     } else { // cascade topology
00527                         if ( !incremental_connections ){
00528                             //if (hard_activation_function) { /*not implemented*/ } else
00529                             for ( int i = stage-1; i >= 0; i-- ) { // bprop_tanh equivalent, also modifies internal_weights
00530                                 hidden_gradient[i] *= (1 - h[i]*h[i]);
00531                                 for ( int j = 0; j < i; j++ ) {
00532                                     if ( !residual_correlation_gradient ) // back-propagate gradients through internal weights
00533                                         hidden_gradient[j] += internal_weights[i][j] * hidden_gradient[i]; 
00534                                     internal_weights[i][j] -= learning_rate * ( hidden_gradient[i] * h[j] );
00535                                     //+ output_weight_decay * sign(internal_weights[i][j]) );
00536                                 }
00537                             }            
00538                         } else { // incremental internal connections 
00539                             for ( int i = stage-1; i >= 0; i-- ) { 
00540                                 hidden_gradient[i] *= (1 - h[i]*h[i]);
00541                                 if ( !residual_correlation_gradient ) {
00542                                     for ( int j = 0; j < i; j++ ) // back-propagate gradients through internal connections.
00543                                         hidden_gradient[j] += internal_weights[i][j] * hidden_gradient[i];
00544                                 }
00545                                 update_incremental_connections( internal_weights[i], internal_weight_gradients[i], h, hidden_gradient[i] );
00546                             } 
00547                             //hidden_gradient[0] *= (1 - h[0]*h[0]);  // the first unit has no incomming internal connections
00548                         }           
00549                     }
00550           
00551                     //hidden_gradient *= -learning_rate;
00552                     hidden_layer_biases -= hidden_gradient * learning_rate;
00553                     if ( !incremental_connections ) {
00554                         // bprop through hidden layer and update hidden_weights
00555                         externalProductAcc(hidden_layer_weights, hidden_gradient * (-learning_rate), input);
00556                     } else { // incremental_connections
00557                         for ( int i = 0; i < stage; i++ ){
00558                             update_incremental_connections( hidden_layer_weights(i), hidden_layer_weight_gradients(i), input, 
00559                                                             hidden_gradient[i] );
00560                         }
00561                     }
00562                 }
00563             }
00564 
00565             //MNT
00566             if ( verbosity > 3 ) {
00567                 cout << "STAGE: " << stage << endl
00568                      << "input: " << input << endl 
00569                      << "output: " << output << endl  
00570                      << "target: " << target << endl                          
00571                      << "train_costs: " << train_costs << endl
00572                      << "output_gradient: " << output_gradient << endl
00573                      << "candidate_h: " << candidate_h << endl
00574                      << "current_average_cost: " << current_average_cost << endl
00575                     ;   
00576                 if ( stage > 0 ) {
00577                     cout << "hidden_layer_weights: " << hidden_layer_weights //<< endl             
00578                          << "hidden_layer_biases: " << hidden_layer_biases << endl
00579                         ;  
00580                 }
00581                 if ( verbosity > 4 ) {     
00582                     cout << "  output_with_candidate: " << output_with_candidate << endl;
00583                     cout << "  target: " << target << endl;
00584                     cout << "  candidate_unit_output_weights_mat(before): " << candidate_unit_output_weights_mat;
00585                     cout << "  candidate_unit_weights (before): " << candidate_unit_weights << endl;
00586                     cout << "  candidate_unit_bias (before): " << candidate_unit_bias << endl;
00587                 }
00588             }
00589     
00590             // TRAINING OF THE CANDIDATE UNIT
00591             // backprop & update candidate hidden unit
00592             output_loss_gradient(output_with_candidate, target, output_gradient_with_candidate, sampleweight);     
00593             // computes candidate_hidden_gradient, and updates candidate_unit_output_weights_mat
00594             layerBpropUpdate(candidate_hidden_gradient, candidate_unit_output_weights_mat, 
00595                              candidate_h_vec, output_gradient_with_candidate, learning_rate);
00596       
00597             if ( residual_correlation_gradient ) {
00598                 residual_correlation_output_gradient( candidate_unit_output_weight_gradients, candidate_unit_output_weights, 
00599                                                       output_gradient_with_candidate, candidate_h, candidate_hidden_gradient[0] );
00600             }
00601       
00602             // bprop through candidate hidden unit activation, heuristic method
00603             if (hard_activation_function)
00604             {
00605                 multiplyAdd(output_with_candidate,candidate_unit_output_weights,-2*candidate_h,output_with_signchange);
00606                 real fit_error_with_sign_change = output_loss(output_with_signchange,target);
00607                 int hidden_class = int(sign(fit_error_with_sign_change-current_fit_error)*candidate_h);
00608                 real weight_on_loss = fabs(candidate_hidden_gradient[0]); // CHECK: when is the sign of hidden_gradient different from (h[i]-target_i)?
00609                 if (use_hinge_loss_for_hard_activation)
00610                     candidate_hidden_gradient[0] = weight_on_loss * d_hinge_loss(candidate_act,hidden_class);
00611                 else // use cross-entropy
00612                     candidate_hidden_gradient[0] = weight_on_loss * (sigmoid(candidate_act) - 2*(hidden_class+1));
00613             } else {
00614                 bprop_tanh(candidate_h_vec,candidate_hidden_gradient,candidate_hidden_gradient);        
00615             }
00616 
00617             //candidate_hidden_gradient *= -learning_rate;
00618             candidate_unit_bias -= candidate_hidden_gradient[0] * learning_rate;
00619             
00620             if ( incremental_connections ) {
00621                 update_incremental_connections( candidate_unit_weights, candidate_unit_weight_gradients, input,
00622                                                 candidate_hidden_gradient[0]);
00623         
00624                 if ( enable_internal_weights && stage > 0 ) { // consider weights from older hidden units
00625                     update_incremental_connections( candidate_unit_internal_weights, candidate_unit_internal_weight_gradients, h,
00626                                                     candidate_hidden_gradient[0]);
00627                 }                
00628             } else {  // train all connections at once    
00629                 multiplyAcc( candidate_unit_weights, input, candidate_hidden_gradient[0] * (-learning_rate) );
00630                 if ( enable_internal_weights && stage > 0 ) // consider weights from older hidden units
00631                     multiplyAcc( candidate_unit_internal_weights, h, candidate_hidden_gradient[0] * (-learning_rate) );
00632             }
00633 
00634             //MNT
00635             if ( verbosity > 4 ) {
00636                 cout << "  candidate_hidden_gradient: " << candidate_hidden_gradient << endl;
00637                 cout << "  candidate_unit_output_weights_mat(after): " << candidate_unit_output_weights_mat;
00638                 cout << "  candidate_unit_weights (after): " << candidate_unit_weights << endl;
00639                 cout << "  candidate_unit_bias (after): " << candidate_unit_bias << endl;
00640             }
00641            
00642             // keep track of average performance with and without candidate hidden unit
00643             n_examples_seen++;
00644             int n_batches_seen = n_examples_seen / minibatchsize;
00645             int t_since_beginning_of_batch = n_examples_seen - n_batches_seen*minibatchsize;
00646             if (!online)
00647                 moving_average_coefficient = 1.0/(1+t_since_beginning_of_batch);
00648       
00649             next_average_cost = moving_average_coefficient*costs_with_candidate[0]
00650                 +(1-moving_average_coefficient)*next_average_cost;
00651             if (n_examples_seen==1) {
00652                 current_average_cost = current_total_cost;
00653                 old_current_average_cost = current_average_cost;
00654                 old_next_average_cost = next_average_cost;
00655             } else { 
00656                 current_average_cost = moving_average_coefficient*current_total_cost
00657                     +(1-moving_average_coefficient)*current_average_cost;
00658             }
00659       
00660             if (verbosity>1 && cost_type!=0)
00661             {
00662                 current_average_class_error = moving_average_coefficient*current_class_error
00663                     +(1-moving_average_coefficient)*current_average_class_error;
00664                 next_average_class_error = moving_average_coefficient*candidate_class_error
00665                     +(1-moving_average_coefficient)*next_average_class_error;
00666             }
00667            
00668             // consider inserting the candidate hidden unit (at every minibatchsize examples)
00669             if (t_since_beginning_of_batch == 0) 
00670             {        
00671         
00672                 old_current_average_cost = current_average_cost;
00673                 old_next_average_cost = next_average_cost;
00674 
00675                 n_examples_training_candidate += minibatchsize;
00676                 if (verbosity>1) 
00677                 {
00678                     cout << "At t=" << real(n_examples_seen)/train_set->length() 
00679                          << " epochs, estimated average cost = " << current_average_cost 
00680                          << " (with candidate " << next_average_cost << " )"<< endl;
00681                     if (verbosity>2)
00682                         cout << "(current cost = " << current_total_cost << "; and with candidate = " 
00683                              << costs_with_candidate[0] << ")" << endl;
00684                     if (cost_type!=0)
00685                         cout << "Estimated classification error = " << current_average_class_error 
00686                              << " (with candidate " << next_average_class_error << " )"<< endl;
00687                     cout << "learning rate = " << learning_rate << endl;
00688                 }
00689         
00690                 if ( next_average_cost < current_average_cost && stage < nstages ) 
00691                 {
00692                     // insert candidate hidden unit
00693                     stage++;
00694                     output_weights.resize(stage,n_outputs);
00695                     hidden_layer_weights.resize(stage,inputsize());
00696                     hidden_layer_biases.resize(stage);
00697                     hidden_gradient.resize(stage);
00698                     output_weights(stage-1) << candidate_unit_output_weights;
00699                     hidden_layer_weights(stage-1) << candidate_unit_weights;
00700                     hidden_layer_biases[stage-1] = candidate_unit_bias;
00701                     if ( incremental_connections ){
00702                         hidden_layer_weight_gradients.resize(stage,inputsize());  
00703                         hidden_layer_weight_gradients(stage-1) << candidate_unit_weight_gradients;
00704                     }
00705                     if ( residual_correlation_gradient && n_outputs > 1 ) {
00706                         output_weight_gradients.resize(stage,n_outputs);
00707                         output_weight_gradients(stage-1) << candidate_unit_output_weight_gradients;
00708                         candidate_unit_output_weight_gradients.fill(0.0);
00709                     }
00710                     if ( enable_internal_weights ) {
00711                         internal_weights.resize(stage);
00712                         internal_weights[stage-1].resize(stage-1);
00713                         internal_weights[stage-1] << candidate_unit_internal_weights;
00714                         //if ( stage > 1 ) 
00715                         //cout << "internal_weights.size(): " << internal_weights.size() << endl;
00716                         candidate_unit_internal_weights.resize(stage);
00717                         if  ( incremental_connections ){
00718                             internal_weight_gradients.resize(stage);
00719                             internal_weight_gradients[stage-1].resize(stage-1);
00720                             internal_weight_gradients[stage-1] << candidate_unit_internal_weight_gradients;
00721                             candidate_unit_internal_weight_gradients.resize(stage);
00722                             candidate_unit_internal_weights.fill(.0);
00723                             //candidate_unit_internal_weights.fill(0.01/stage);
00724                             candidate_unit_internal_weight_gradients.fill(.0);
00725                         } else {
00726                             candidate_unit_internal_weights.fill(.0);
00727                             //candidate_unit_internal_weights.fill(0.01/stage);
00728                         }
00729                     }
00730                     act.resize(stage);
00731                     h.resize(stage);
00732                     // initialize a new candidate
00733                     candidate_unit_output_weights.fill(0.01/stage);
00734                     //candidate_unit_weights.clear();
00735                     //MNT
00736                     if (!incremental_connections) {
00737                         for( int i=0; i < candidate_unit_weights.length(); i++ )
00738                             candidate_unit_weights[i] = ((real)rand()/RAND_MAX - 0.5)*rand_range; 
00739                     } else candidate_unit_weights.fill(.0);
00740                     candidate_unit_bias = ((real)rand()/RAND_MAX - 0.5)*rand_range;
00741           
00742                     if (verbosity>1)
00743                         cout << "Adding hidden unit number " << stage << " after training it for "
00744                              << n_examples_training_candidate << " examples.\n The average cost is "
00745                              << "expected to decrease from " << current_average_cost << " to " 
00746                              << next_average_cost << "." << endl;
00747                     n_examples_training_candidate=0;
00748                 } else {// should we stop?
00749                     if (n_examples_training_candidate >= max_n_epochs_to_fail*train_set->length())
00750                     {
00751                         stopping_criterion_not_met = false; // STOP
00752                         if (verbosity>0)
00753                             cout << "Stopping at " << stage << " units, after seeing " << n_examples_seen 
00754                                  << " examples in " << n_examples_seen/train_set->length() << " epochs." << endl
00755                                  << "The next candidate unit yields an apparent average cost of " 
00756                                  << next_average_cost << " instead of the current one of " << current_average_cost << endl;
00757                     }
00758                 }
00759                 if (!online)
00760                     current_average_cost = 0;
00761             }
00762         }
00763         while (stage<nstages && stopping_criterion_not_met);
00764           
00765         //++stage;
00766         train_stats->finalize(); // finalize statistics for this epoch
00767     }
00768 }
00769 
00770 
00771 void IncrementalNNet::computeOutput(const Vec& input, Vec& output) const
00772 {
00773     // Compute the output from the input.
00774     int nout = outputsize();
00775     output.resize(nout);
00776     if (stage>0)
00777     {
00778         product( act, hidden_layer_weights, input );
00779         act += hidden_layer_biases;
00780 
00781         if ( enable_internal_weights ) { // cascade topology     
00782             for( int i = 0; i < stage; i++ ) {
00783                 h[i] = hard_activation_function ? sign( act[i] ) : tanh( act[i] );
00784                 for( int j = i+1; j < stage; j++ ) {
00785                     act[j] += h[i] * internal_weights[j][i];
00786                 }
00787             }
00788         } else {                         // simple one-hidden-layer topology
00789             if (hard_activation_function) 
00790                 compute_sign(act,h);
00791             else 
00792                 compute_tanh(act,h);
00793         } 
00794         transposeProduct(linear_output,output_weights,h);      
00795     } 
00796     else  linear_output.clear();
00797     linear_output+=output_biases;
00798     if (cost_type==2) // "discrete_log_likelihood"
00799         softmax(linear_output,output);
00800     else
00801         output << linear_output;
00802 }    
00803 
00804 real IncrementalNNet::output_loss(const Vec& output,const Vec& target) const
00805 {
00806     real fit_error=0;
00807     if (cost_type == 0) // "squared_error"
00808         fit_error = powdistance(output,target);
00809     else {
00810         int target_class = int(target[0]);
00811         if (cost_type == 1) // "hinge_loss", one against all binary classifiers
00812             fit_error = one_against_all_hinge_loss(output,target_class);
00813         else // (output_cost_type == "discrete_log_likelihood")
00814             fit_error = - safelog(output[target_class]); // - sum safelog(1-the_rest_of_the_output)?
00815     }
00816     return fit_error;
00817 }
00818 
00819 void IncrementalNNet::output_loss_gradient(const Vec& output,const Vec& target,
00820                                            Vec output_gradient, real sampleweight) const
00821 {
00822     if (cost_type==0) // "squared_error"
00823     {
00824         substract(output,target,output_gradient);
00825         output_gradient *= sampleweight * 2;
00826         return;
00827     }
00828     int target_class = int(target[0]);
00829     if (cost_type==1) // "hinge_loss"
00830     {
00831         one_against_all_hinge_loss_bprop(output,target_class,
00832                                          output_gradient);
00833         if (sampleweight!=1)
00834             output_gradient *= sampleweight;
00835     }
00836     else // (output_cost_type=="discrete_log_likelihood")
00837     {
00838         for (int i=0;i<n_outputs;i++)
00839         {
00840             real y_i = (target_class==i)?1:0;
00841             output_gradient[i] = sampleweight*(output[i] - y_i);
00842         }
00843     }  
00844 }
00845 
00846 void IncrementalNNet::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00847                                               const Vec& target, Vec& costs) const
00848 {
00849     // Compute the costs from *already* computed output. 
00850     real fit_error = output_loss(output,target);
00851     real regularization_penalty = output_weight_decay * sumabs(output_weights);
00852     //regularization_penalty += output_weight_decay * sumabs(direct_weights); - doesn't change anything
00853     costs[0] = fit_error + regularization_penalty;
00854     costs[1] = fit_error;
00855     costs[2] = regularization_penalty;
00856     if (cost_type!=0) // classification type
00857     {
00858         int topscoring_class = argmax(output);
00859         int target_class = int(target[0]);
00860         costs[3] = (target_class!=topscoring_class); // 1 or 0
00861     }
00862 }                                
00863 
00864 TVec<string> IncrementalNNet::getTestCostNames() const
00865 {
00866     // Return the names of the costs computed by computeCostsFromOutpus
00867     // (these may or may not be exactly the same as what's returned by getTrainCostNames).
00868     if (output_cost_type=="squared_error") // regression-type
00869     {
00870         TVec<string> names(3);
00871         names[0]=output_cost_type+"+L1_regularization";
00872         names[1]=output_cost_type;
00873         names[2]="+L1_regularization";
00874         return names;
00875     }
00876     // else classification-type
00877     TVec<string> names(4);
00878     names[0]=output_cost_type+"+L1_regularization";
00879     names[1]=output_cost_type;
00880     names[2]="+L1_regularization";
00881     names[3]="class_error";
00882     return names;
00883 }
00884 
00885 TVec<string> IncrementalNNet::getTrainCostNames() const
00886 {
00887     // Return the names of the objective costs that the train method computes and 
00888     // for which it updates the VecStatsCollector train_stats
00889     // (these may or may not be exactly the same as what's returned by getTestCostNames).
00890     return getTestCostNames();
00891 }
00892 
00893 void IncrementalNNet::update_incremental_connections( Vec weights, Vec MAgradients, const Vec& input, real gradient ) const{
00894     int n = weights.size();
00895     for ( int i = 0; i < n; i++ ) {
00896         MAgradients[i] = gradient * input[i]
00897             * moving_average_coefficient + (1-moving_average_coefficient)*MAgradients[i];
00898         if ( weights[i] == 0.0 ) {
00899             if ( fabs(MAgradients[i]) > connection_gradient_threshold ){ // add connection
00900                 //weights[i] = - 5 * learning_rate * MAgradients[i];
00901                 weights[i] -= gradient * input[i] * learning_rate;
00902             }
00903         } else {
00904             if ( fabs( weights[i] ) + fabs( MAgradients[i] ) < connection_removing_threshold ) 
00905                 weights[i] = 0.0;  // remove connection
00906             else 
00907                 weights[i] -= gradient * input[i] * learning_rate; // update connection
00908         }
00909     }
00910 }
00911 
00912 void IncrementalNNet::residual_correlation_output_gradient( Vec MAgradients, const Vec& weights, const Vec& output_gradient, 
00913                                                             real activation, real& hidden_gradient ) const
00914 {
00915     int n = MAgradients.size();
00916     if ( n > 1 ){ // calculate candidate_unit_output_weight_gradients
00917         int max_gradient_index = 0;
00918         real max_gradient_value = -1.0;
00919         bool initial = ( activation == 0.0 );
00920         for ( int j = 0; j < n; j++ ) {
00921             MAgradients[j] = output_gradient[j] * activation 
00922                 * moving_average_coefficient +(1-moving_average_coefficient)*MAgradients[j];
00923             real gradient_abs = fabs( initial ? output_gradient[j] : MAgradients[j] );
00924             if ( gradient_abs > max_gradient_value ){ 
00925                 max_gradient_value = gradient_abs;
00926                 max_gradient_index = j;
00927             }
00928         }
00929         hidden_gradient = output_gradient[max_gradient_index] 
00930             * sign( weights[max_gradient_index] );              
00931     } else hidden_gradient = output_gradient[0];
00932 
00933 }
00934 
00935 } // end of namespace PLearn
00936 
00937 
00938 /*
00939   Local Variables:
00940   mode:c++
00941   c-basic-offset:4
00942   c-file-style:"stroustrup"
00943   c-file-offsets:((innamespace . 0)(inline-open . 0))
00944   indent-tabs-mode:nil
00945   fill-column:79
00946   End:
00947 */
00948 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines