PLearn 0.1
|
#include <IncrementalNNet.h>
Public Member Functions | |
IncrementalNNet () | |
Default constructor. | |
virtual void | build () |
Simply calls inherited::build() then build_(). | |
virtual void | makeDeepCopyFromShallowCopy (CopiesMap &copies) |
Transforms a shallow copy into a deep copy. | |
virtual string | classname () const |
virtual OptionList & | getOptionList () const |
virtual OptionMap & | getOptionMap () const |
virtual RemoteMethodMap & | getRemoteMethodMap () const |
virtual IncrementalNNet * | deepCopy (CopiesMap &copies) const |
virtual int | outputsize () const |
Returns the size of this learner's output, (which typically may depend on its inputsize(), targetsize() and set options). | |
virtual void | forget () |
(Re-)initializes the PLearner in its fresh state (that state may depend on the 'seed' option) And sets 'stage' back to 0 (this is the stage of a fresh learner!). | |
virtual void | train () |
The role of the train method is to bring the learner up to stage==nstages, updating the train_stats collector with training costs measured on-line in the process. | |
virtual void | computeOutput (const Vec &input, Vec &output) const |
Computes the output from the input. | |
virtual void | computeCostsFromOutputs (const Vec &input, const Vec &output, const Vec &target, Vec &costs) const |
Computes the costs from already computed output. | |
virtual TVec< std::string > | getTestCostNames () const |
Returns the names of the costs computed by computeCostsFromOutpus (and thus the test method). | |
virtual TVec< std::string > | getTrainCostNames () const |
Returns the names of the objective costs that the train method computes and for which it updates the VecStatsCollector train_stats. | |
virtual real | output_loss (const Vec &output, const Vec &target) const |
virtual void | output_loss_gradient (const Vec &output, const Vec &target, Vec output_gradient, real sampleweight) const |
void | update_incremental_connections (Vec weights, Vec MAgradients, const Vec &input, real gradient) const |
void | residual_correlation_output_gradient (Vec MAgradients, const Vec &weights, const Vec &output_gradient, real activation, real &hidden_gradient) const |
Static Public Member Functions | |
static string | _classname_ () |
static OptionList & | _getOptionList_ () |
static RemoteMethodMap & | _getRemoteMethodMap_ () |
static Object * | _new_instance_for_typemap_ () |
static bool | _isa_ (const Object *o) |
static void | _static_initialize_ () |
static const PPath & | declaringFile () |
Public Attributes | |
int | n_outputs |
real | output_weight_decay |
bool | online |
int | minibatch_size |
string | output_cost_type |
bool | boosting |
bool | minimize_local_cost |
bool | hard_activation_function |
bool | use_hinge_loss_for_hard_activation |
real | initial_learning_rate |
real | decay_factor |
real | max_n_epochs_to_fail |
real | rand_range |
bool | enable_internal_weights |
bool | incremental_connections |
real | connection_gradient_threshold |
real | connection_removing_threshold |
bool | residual_correlation_gradient |
Vec | linear_output |
Vec | act |
Vec | h |
int | cost_type |
Static Public Attributes | |
static StaticInitializer | _static_initializer_ |
Static Protected Member Functions | |
static void | declareOptions (OptionList &ol) |
Declares this class' options. | |
Protected Attributes | |
Mat | direct_weights |
Mat | direct_weight_gradients |
Mat | output_weights |
Mat | output_weight_gradients |
Vec | output_biases |
Mat | hidden_layer_weights |
Mat | hidden_layer_weight_gradients |
Vec | hidden_layer_biases |
TVec< Vec > | internal_weights |
TVec< Vec > | internal_weight_gradients |
Vec | candidate_unit_weights |
Vec | candidate_unit_weight_gradients |
real | candidate_unit_bias |
Vec | candidate_unit_output_weights |
Vec | candidate_unit_output_weight_gradients |
Vec | candidate_unit_internal_weights |
Vec | candidate_unit_internal_weight_gradients |
int | n_examples_seen |
real | current_average_cost |
real | next_average_cost |
int | n_examples_training_candidate |
int | current_example |
real | moving_average_coefficient |
real | learning_rate |
Private Types | |
typedef PLearner | inherited |
Private Member Functions | |
void | build_ () |
This does the actual building. |
Definition at line 51 of file IncrementalNNet.h.
typedef PLearner PLearn::IncrementalNNet::inherited [private] |
Reimplemented from PLearn::PLearner.
Definition at line 56 of file IncrementalNNet.h.
PLearn::IncrementalNNet::IncrementalNNet | ( | ) |
Default constructor.
Definition at line 49 of file IncrementalNNet.cc.
: internal_weights(0), internal_weight_gradients(0), candidate_unit_bias(0), n_examples_seen(0), current_average_cost(0), next_average_cost(0), n_examples_training_candidate(0), current_example(0), n_outputs(1), output_weight_decay(0), online(true), minibatch_size(0), output_cost_type("squared_error"), boosting(false), minimize_local_cost(false), hard_activation_function(false), use_hinge_loss_for_hard_activation(true), initial_learning_rate(0.01), decay_factor(1e-6), max_n_epochs_to_fail(1), rand_range(1), enable_internal_weights(false), incremental_connections(false), connection_gradient_threshold(0.5), connection_removing_threshold(0.0), residual_correlation_gradient(true) { }
string PLearn::IncrementalNNet::_classname_ | ( | ) | [static] |
Reimplemented from PLearn::PLearner.
Definition at line 83 of file IncrementalNNet.cc.
OptionList & PLearn::IncrementalNNet::_getOptionList_ | ( | ) | [static] |
Reimplemented from PLearn::PLearner.
Definition at line 83 of file IncrementalNNet.cc.
RemoteMethodMap & PLearn::IncrementalNNet::_getRemoteMethodMap_ | ( | ) | [static] |
Reimplemented from PLearn::PLearner.
Definition at line 83 of file IncrementalNNet.cc.
Reimplemented from PLearn::PLearner.
Definition at line 83 of file IncrementalNNet.cc.
Object * PLearn::IncrementalNNet::_new_instance_for_typemap_ | ( | ) | [static] |
Reimplemented from PLearn::Object.
Definition at line 83 of file IncrementalNNet.cc.
StaticInitializer IncrementalNNet::_static_initializer_ & PLearn::IncrementalNNet::_static_initialize_ | ( | ) | [static] |
Reimplemented from PLearn::PLearner.
Definition at line 83 of file IncrementalNNet.cc.
void PLearn::IncrementalNNet::build | ( | ) | [virtual] |
Simply calls inherited::build() then build_().
Reimplemented from PLearn::PLearner.
Definition at line 287 of file IncrementalNNet.cc.
References PLearn::PLearner::build(), and build_().
{ inherited::build(); build_(); }
void PLearn::IncrementalNNet::build_ | ( | ) | [private] |
This does the actual building.
Reimplemented from PLearn::PLearner.
Definition at line 241 of file IncrementalNNet.cc.
References act, candidate_unit_internal_weight_gradients, candidate_unit_internal_weights, candidate_unit_output_weight_gradients, candidate_unit_output_weights, candidate_unit_weight_gradients, candidate_unit_weights, cost_type, direct_weight_gradients, direct_weights, enable_internal_weights, h, hidden_layer_biases, hidden_layer_weight_gradients, hidden_layer_weights, incremental_connections, PLearn::PLearner::inputsize_, internal_weight_gradients, internal_weights, linear_output, n_outputs, output_biases, output_cost_type, output_weight_gradients, output_weights, PLERROR, residual_correlation_gradient, PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), PLearn::PLearner::stage, and PLearn::PLearner::train_set.
Referenced by build(), and forget().
{ if (output_cost_type=="squared_error") cost_type=0; else if (output_cost_type=="hinge_loss") cost_type=1; else if (output_cost_type=="discrete_log_likelihood") cost_type=2; else PLERROR("IncrementalNNet:build: output_cost_type should either be 'squared_error', 'hinge_loss', or 'discrete_log_likelihood'"); if(!train_set) return; direct_weights.resize(n_outputs,inputsize_); output_weights.resize(stage,n_outputs); output_biases.resize(n_outputs); hidden_layer_weights.resize(stage,inputsize_); hidden_layer_biases.resize(stage); linear_output.resize(n_outputs); act.resize(stage); h.resize(stage); candidate_unit_output_weights.resize(n_outputs); candidate_unit_weights.resize(inputsize_); if ( enable_internal_weights ) { internal_weights.resize(stage); //.clear(); candidate_unit_internal_weights.resize(stage); } if ( incremental_connections ) { direct_weight_gradients.resize(n_outputs,inputsize_); hidden_layer_weight_gradients.resize(stage,inputsize_); candidate_unit_weight_gradients.resize(inputsize_); if ( enable_internal_weights ) { internal_weight_gradients.resize(stage); candidate_unit_internal_weight_gradients.resize(stage); } } if ( residual_correlation_gradient & n_outputs > 1 ) { output_weight_gradients.resize(stage,n_outputs); candidate_unit_output_weight_gradients.resize(n_outputs); } }
string PLearn::IncrementalNNet::classname | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 83 of file IncrementalNNet.cc.
void PLearn::IncrementalNNet::computeCostsFromOutputs | ( | const Vec & | input, |
const Vec & | output, | ||
const Vec & | target, | ||
Vec & | costs | ||
) | const [virtual] |
Computes the costs from already computed output.
Implements PLearn::PLearner.
Definition at line 846 of file IncrementalNNet.cc.
References PLearn::argmax(), cost_type, output_loss(), output_weight_decay, output_weights, and PLearn::sumabs().
Referenced by train().
{ // Compute the costs from *already* computed output. real fit_error = output_loss(output,target); real regularization_penalty = output_weight_decay * sumabs(output_weights); //regularization_penalty += output_weight_decay * sumabs(direct_weights); - doesn't change anything costs[0] = fit_error + regularization_penalty; costs[1] = fit_error; costs[2] = regularization_penalty; if (cost_type!=0) // classification type { int topscoring_class = argmax(output); int target_class = int(target[0]); costs[3] = (target_class!=topscoring_class); // 1 or 0 } }
Computes the output from the input.
Reimplemented from PLearn::PLearner.
Definition at line 771 of file IncrementalNNet.cc.
References act, PLearn::TVec< T >::clear(), PLearn::compute_sign(), PLearn::compute_tanh(), cost_type, enable_internal_weights, h, hard_activation_function, hidden_layer_biases, hidden_layer_weights, i, internal_weights, j, linear_output, output_biases, output_weights, outputsize(), PLearn::product(), PLearn::TVec< T >::resize(), PLearn::sign(), PLearn::softmax(), PLearn::PLearner::stage, PLearn::tanh(), and PLearn::transposeProduct().
Referenced by train().
{ // Compute the output from the input. int nout = outputsize(); output.resize(nout); if (stage>0) { product( act, hidden_layer_weights, input ); act += hidden_layer_biases; if ( enable_internal_weights ) { // cascade topology for( int i = 0; i < stage; i++ ) { h[i] = hard_activation_function ? sign( act[i] ) : tanh( act[i] ); for( int j = i+1; j < stage; j++ ) { act[j] += h[i] * internal_weights[j][i]; } } } else { // simple one-hidden-layer topology if (hard_activation_function) compute_sign(act,h); else compute_tanh(act,h); } transposeProduct(linear_output,output_weights,h); } else linear_output.clear(); linear_output+=output_biases; if (cost_type==2) // "discrete_log_likelihood" softmax(linear_output,output); else output << linear_output; }
void PLearn::IncrementalNNet::declareOptions | ( | OptionList & | ol | ) | [static, protected] |
Declares this class' options.
Reimplemented from PLearn::PLearner.
Definition at line 85 of file IncrementalNNet.cc.
References boosting, PLearn::OptionBase::buildoption, candidate_unit_bias, candidate_unit_internal_weight_gradients, candidate_unit_internal_weights, candidate_unit_output_weight_gradients, candidate_unit_output_weights, candidate_unit_weight_gradients, candidate_unit_weights, connection_gradient_threshold, connection_removing_threshold, current_average_cost, decay_factor, PLearn::declareOption(), PLearn::PLearner::declareOptions(), direct_weight_gradients, direct_weights, enable_internal_weights, hard_activation_function, hidden_layer_biases, hidden_layer_weight_gradients, hidden_layer_weights, incremental_connections, initial_learning_rate, internal_weight_gradients, internal_weights, PLearn::OptionBase::learntoption, max_n_epochs_to_fail, minibatch_size, minimize_local_cost, n_examples_seen, n_examples_training_candidate, n_outputs, next_average_cost, online, output_biases, output_cost_type, output_weight_decay, output_weight_gradients, output_weights, rand_range, residual_correlation_gradient, and use_hinge_loss_for_hard_activation.
{ declareOption(ol, "n_outputs", &IncrementalNNet::n_outputs, OptionBase::buildoption, "Number of output units. Must be coherent with output_cost_type and targetsize:\n" "n_outputs==targetsize for 'squared_error', and targetsize==1 && n_outputs==n_classes for\n" "hinge_loss and discrete_log_likelihood.\n"); declareOption(ol, "output_weight_decay", &IncrementalNNet::output_weight_decay, OptionBase::buildoption, "L1 regularizer's penalty factor on output weights."); declareOption(ol, "online", &IncrementalNNet::online, OptionBase::buildoption, "use online or batch version? only consider adding a hidden unit after minibatch_size examples\n" "Add a hidden unit only if it would reduce the average cost (including the L1 penalty).\n" "This current_average_cost is calculated either with a moving average over a moving target (online version)\n" "or the algorithm proceeds in two phases (batch version): on even batches one improves the\n" "tentative hidden unit, while on odd batches one evaluates its quality.\n"); declareOption(ol, "minibatch_size", &IncrementalNNet::minibatch_size, OptionBase::buildoption, "0 is a special value meaning minibatch_size == training set size.\n" "After a hidden unit is added, wait at least that number of examples before considering\n" "to add a new one.\n"); declareOption(ol, "output_cost_type", &IncrementalNNet::output_cost_type, OptionBase::buildoption, "'squared_error', 'hinge_loss', 'discrete_log_likelihood' (for probabilistic classification).\n"); declareOption(ol, "boosting", &IncrementalNNet::boosting, OptionBase::buildoption, "use a boosting-like approach (only works with online=false) and train the new hidden unit \n" "but not the previous ones; also descend not the actual cost but a weighted cost obtained\n" "from the gradient of the output cost on the hidden unit function (see minimize_local_cost option).\n"); declareOption(ol, "minimize_local_cost", &IncrementalNNet::minimize_local_cost, OptionBase::buildoption, "if true then instead of minimize global cost sum_t Q(f(x_t),y_t),\n" "each hidden unit minimizes sum_t Q'(f(x_t),y_t) h(x_t)\n" "or some approximation of it if h is a hard threshold (weighted logistic regression cost\n" "with targets sign(Q'(f(x_t),y_t)) and weights |Q'(f(x_t),y_t)|),\n" "where Q is the output cost, f(x_t) is the current prediction, y_t the target, h(x_t) the\n" "output of the new hidden unit.\n"); declareOption(ol, "hard_activation_function", &IncrementalNNet::hard_activation_function, OptionBase::buildoption, "if true then h(x) = sign(w'x + b), else h(x) = tanh(w'x + b).\n"); declareOption(ol, "use_hinge_loss_for_hard_activation", &IncrementalNNet::use_hinge_loss_for_hard_activation, OptionBase::buildoption, "use hinge loss or cross-entropy to train hidden units, when hard_activation_function\n"); declareOption(ol, "initial_learning_rate", &IncrementalNNet::initial_learning_rate, OptionBase::buildoption, "learning_rate = initial_learning_rate / (1 + n_examples_seen * decay_factor).\n"); declareOption(ol, "decay_factor", &IncrementalNNet::decay_factor, OptionBase::buildoption, "decay factor in learning_rate formula.\n"); declareOption(ol, "max_n_epochs_to_fail", &IncrementalNNet::max_n_epochs_to_fail, OptionBase::buildoption, "Maximum number of epochs (not necessarily an integer) to try improving the new hidden unit\n" "before declaring failure to improve the regularized cost (and thus stopping training).\n"); declareOption(ol, "rand_range", &IncrementalNNet::rand_range, OptionBase::buildoption, "Interval of random numbers when initializing weights/biases: (-rand_range/2, rand_range/2).\n"); declareOption(ol, "enable_internal_weights", &IncrementalNNet::enable_internal_weights, OptionBase::buildoption, "Network has a cascade topology (each hidden unit has connections to all previous ones) if true,\n" "or a one hidden layer topology if false (default).\n"); declareOption(ol, "incremental_connections", &IncrementalNNet::incremental_connections, OptionBase::buildoption, "Add hidden connections incrementally if true, or all at once with a new unit if false (default).\n" "This option is only supported with n_outputs == 1." ); declareOption(ol, "connection_gradient_threshold", &IncrementalNNet::connection_gradient_threshold, OptionBase::buildoption, "Threshold of gradient for connection to be added, when incremental_connections == true." ); declareOption(ol, "connection_removing_threshold", &IncrementalNNet::connection_removing_threshold, OptionBase::buildoption, "Connections are removed for which |weight|+|MAgradient| < connection_removing_threshold.\n" "Default value is 0 (connections are not removed). Ednabled by incremental_connections." ); declareOption(ol, "residual_correlation_gradient", &IncrementalNNet::residual_correlation_gradient, OptionBase::buildoption, "Use residual correlation gradient (ConvexNN) if true (default), or classical error back-propagation if false." ); //declareOption(ol, "", &IncrementalNNet::, OptionBase::buildoption, declareOption(ol, "direct_weights", &IncrementalNNet::direct_weights, OptionBase::learntoption, "matrix of direct [output, input] weights.\n"); declareOption(ol, "direct_weight_gradients", &IncrementalNNet::direct_weight_gradients, OptionBase::learntoption, "Moving average gradients on matrix of direct [output, input] weights.\n"); declareOption(ol, "output_weights", &IncrementalNNet::output_weights, OptionBase::learntoption, "matrix of [hidden_unit, output] output weights.\n" "** NOTE IT IS TRANSPOSED ** with respect to\n" "the 'natural' index order, so as to easily add hidden units.\n"); declareOption(ol, "output_weight_gradients", &IncrementalNNet::output_weight_gradients, OptionBase::learntoption, "Moving average gradients on matrix of [hidden_unit, output] output weights\n" "(enabled by residual_correlation_gradient && outputsize() > 1).\n" "** NOTE IT IS TRANSPOSED ** with respect to\n" "the 'natural' index order, so as to easily add hidden units.\n"); declareOption(ol, "output_biases", &IncrementalNNet::output_biases, OptionBase::learntoption, "vector of output biases\n"); declareOption(ol, "hidden_layer_weights", &IncrementalNNet::hidden_layer_weights, OptionBase::learntoption, "matrix of weights from input to hidden units: [hidden_unit, input].\n"); declareOption(ol, "hidden_layer_weight_gradients", &IncrementalNNet::hidden_layer_weight_gradients, OptionBase::learntoption, "Moving average gradients on hidden_layer_weights (enabled by incremental_connections).\n"); declareOption(ol, "internal_weights", &IncrementalNNet::internal_weights, OptionBase::learntoption, "weights among hidden units [to, from] in cascade architecture (enabled by enable_internal_weights).\n"); declareOption(ol, "internal_weight_gradients", &IncrementalNNet::internal_weight_gradients, OptionBase::learntoption, "Moving average gradients on internal_weights (enabled by incremental_connections).\n"); declareOption(ol, "hidden_layer_biases", &IncrementalNNet::hidden_layer_biases, OptionBase::learntoption, "vector of biases of the hidden units.\n"); declareOption(ol, "candidate_unit_weights", &IncrementalNNet::candidate_unit_weights, OptionBase::learntoption, "vector of weights from input to next candidate hidden unit.\n"); declareOption(ol, "candidate_unit_weight_gradients", &IncrementalNNet::candidate_unit_weight_gradients, OptionBase::learntoption, "Moving average gradients on candidate_unit_weights (enabled by incremental_connections).\n"); declareOption(ol, "candidate_unit_bias", &IncrementalNNet::candidate_unit_bias, OptionBase::learntoption, "bias parameter of next candidate hidden unit.\n"); declareOption(ol, "candidate_unit_output_weights", &IncrementalNNet::candidate_unit_output_weights, OptionBase::learntoption, "vector of weights from next candidate hidden unit to outputs.\n"); declareOption(ol, "candidate_unit_output_weight_gradients", &IncrementalNNet::candidate_unit_output_weight_gradients, OptionBase::learntoption, "Moving average gradients on vector of weights from next candidate hidden unit to outputs.\n" "(enabled by residual_correlation_gradient && outputsize() > 1).\n"); declareOption(ol, "candidate_unit_internal_weights", &IncrementalNNet::candidate_unit_internal_weights, OptionBase::learntoption, "vector of weights from previous hidden units to the candidate unit (enabled by enable_internal_weights).\n"); declareOption(ol, "candidate_unit_internal_weight_gradients", &IncrementalNNet::candidate_unit_internal_weight_gradients, OptionBase::learntoption, "Moving average gradients on candidate_unit_internal_weights (enabled by incremental_connections).\n"); declareOption(ol, "n_examples_seen", &IncrementalNNet::n_examples_seen, OptionBase::learntoption, "number of training examples seen (= number of updates done) seen beginning of training.\n"); declareOption(ol, "current_average_cost", &IncrementalNNet::current_average_cost, OptionBase::learntoption, "current average cost, including fitting and regularization terms. It is computed\n" "differently according to the online and minibatch_size options.\n"); declareOption(ol, "next_average_cost", &IncrementalNNet::next_average_cost, OptionBase::learntoption, "average cost if candidate hidden unit was included. It is computed like current_average_cost.\n"); declareOption(ol, "n_examples_training_candidate", &IncrementalNNet::n_examples_training_candidate, OptionBase::learntoption, "number of examples seen since started to train current candidate hidden unit. Used in\n" "stopping criterion: stop when n_examples_training_candidate >= max_n_epochs_to_fail * train_set->length().\n"); // Now call the parent class' declareOptions inherited::declareOptions(ol); }
static const PPath& PLearn::IncrementalNNet::declaringFile | ( | ) | [inline, static] |
Reimplemented from PLearn::PLearner.
Definition at line 182 of file IncrementalNNet.h.
IncrementalNNet * PLearn::IncrementalNNet::deepCopy | ( | CopiesMap & | copies | ) | const [virtual] |
Reimplemented from PLearn::PLearner.
Definition at line 83 of file IncrementalNNet.cc.
void PLearn::IncrementalNNet::forget | ( | ) | [virtual] |
(Re-)initializes the PLearner in its fresh state (that state may depend on the 'seed' option) And sets 'stage' back to 0 (this is the stage of a fresh learner!).
Reimplemented from PLearn::PLearner.
Definition at line 325 of file IncrementalNNet.cc.
References build_(), candidate_unit_bias, candidate_unit_output_weight_gradients, candidate_unit_output_weights, candidate_unit_weight_gradients, candidate_unit_weights, current_average_cost, current_example, direct_weight_gradients, direct_weights, PLearn::TMat< T >::fill(), PLearn::TVec< T >::fill(), i, incremental_connections, PLearn::PLearner::inputsize_, n_examples_seen, n_outputs, next_average_cost, rand_range, residual_correlation_gradient, and PLearn::PLearner::stage.
Referenced by train().
{ // reset the number of hidden units to 0 = stage stage=0; n_examples_seen=0; current_average_cost=0; next_average_cost=0; current_example=0; // resize all the matrices, vectors with stage=0 build_(); candidate_unit_output_weights.fill(0.1); candidate_unit_bias = ((real)rand()/RAND_MAX - 0.5)*rand_range; if (!incremental_connections) { for( int i=0; i < inputsize_; i++ ) candidate_unit_weights[i] = ((real)rand()/RAND_MAX - 0.5)*rand_range; } else { direct_weights.fill(0.0); direct_weight_gradients.fill(0.0); candidate_unit_weights.fill(0.0); candidate_unit_weight_gradients.fill(0.0); } if ( residual_correlation_gradient && n_outputs > 1 ){ candidate_unit_output_weight_gradients.fill(0.0); } }
OptionList & PLearn::IncrementalNNet::getOptionList | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 83 of file IncrementalNNet.cc.
OptionMap & PLearn::IncrementalNNet::getOptionMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 83 of file IncrementalNNet.cc.
RemoteMethodMap & PLearn::IncrementalNNet::getRemoteMethodMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 83 of file IncrementalNNet.cc.
TVec< string > PLearn::IncrementalNNet::getTestCostNames | ( | ) | const [virtual] |
Returns the names of the costs computed by computeCostsFromOutpus (and thus the test method).
Implements PLearn::PLearner.
Definition at line 864 of file IncrementalNNet.cc.
References output_cost_type.
Referenced by getTrainCostNames().
{ // Return the names of the costs computed by computeCostsFromOutpus // (these may or may not be exactly the same as what's returned by getTrainCostNames). if (output_cost_type=="squared_error") // regression-type { TVec<string> names(3); names[0]=output_cost_type+"+L1_regularization"; names[1]=output_cost_type; names[2]="+L1_regularization"; return names; } // else classification-type TVec<string> names(4); names[0]=output_cost_type+"+L1_regularization"; names[1]=output_cost_type; names[2]="+L1_regularization"; names[3]="class_error"; return names; }
TVec< string > PLearn::IncrementalNNet::getTrainCostNames | ( | ) | const [virtual] |
Returns the names of the objective costs that the train method computes and for which it updates the VecStatsCollector train_stats.
Implements PLearn::PLearner.
Definition at line 885 of file IncrementalNNet.cc.
References getTestCostNames().
{ // Return the names of the objective costs that the train method computes and // for which it updates the VecStatsCollector train_stats // (these may or may not be exactly the same as what's returned by getTestCostNames). return getTestCostNames(); }
void PLearn::IncrementalNNet::makeDeepCopyFromShallowCopy | ( | CopiesMap & | copies | ) | [virtual] |
Transforms a shallow copy into a deep copy.
Reimplemented from PLearn::PLearner.
Definition at line 294 of file IncrementalNNet.cc.
References act, candidate_unit_internal_weight_gradients, candidate_unit_internal_weights, candidate_unit_output_weight_gradients, candidate_unit_output_weights, candidate_unit_weight_gradients, candidate_unit_weights, PLearn::deepCopyField(), direct_weight_gradients, direct_weights, h, hidden_layer_biases, hidden_layer_weight_gradients, hidden_layer_weights, internal_weight_gradients, internal_weights, linear_output, PLearn::PLearner::makeDeepCopyFromShallowCopy(), output_biases, output_weight_gradients, and output_weights.
{ inherited::makeDeepCopyFromShallowCopy(copies); deepCopyField(direct_weights, copies); deepCopyField(direct_weight_gradients, copies); deepCopyField(output_weights, copies); deepCopyField(output_weight_gradients, copies); deepCopyField(output_biases, copies); deepCopyField(hidden_layer_weights, copies); deepCopyField(hidden_layer_weight_gradients, copies); deepCopyField(hidden_layer_biases, copies); deepCopyField(internal_weights, copies); deepCopyField(internal_weight_gradients, copies); deepCopyField(candidate_unit_weights, copies); deepCopyField(candidate_unit_weight_gradients, copies); deepCopyField(candidate_unit_output_weights, copies); deepCopyField(candidate_unit_output_weight_gradients, copies); deepCopyField(candidate_unit_internal_weights, copies); deepCopyField(candidate_unit_internal_weight_gradients, copies); deepCopyField(act, copies); deepCopyField(h, copies); deepCopyField(linear_output, copies); }
real PLearn::IncrementalNNet::output_loss | ( | const Vec & | output, |
const Vec & | target | ||
) | const [virtual] |
Definition at line 804 of file IncrementalNNet.cc.
References cost_type, PLearn::one_against_all_hinge_loss(), PLearn::powdistance(), and PLearn::safelog().
Referenced by computeCostsFromOutputs(), and train().
{ real fit_error=0; if (cost_type == 0) // "squared_error" fit_error = powdistance(output,target); else { int target_class = int(target[0]); if (cost_type == 1) // "hinge_loss", one against all binary classifiers fit_error = one_against_all_hinge_loss(output,target_class); else // (output_cost_type == "discrete_log_likelihood") fit_error = - safelog(output[target_class]); // - sum safelog(1-the_rest_of_the_output)? } return fit_error; }
void PLearn::IncrementalNNet::output_loss_gradient | ( | const Vec & | output, |
const Vec & | target, | ||
Vec | output_gradient, | ||
real | sampleweight | ||
) | const [virtual] |
Definition at line 819 of file IncrementalNNet.cc.
References cost_type, i, n_outputs, PLearn::one_against_all_hinge_loss_bprop(), and PLearn::substract().
Referenced by train().
{ if (cost_type==0) // "squared_error" { substract(output,target,output_gradient); output_gradient *= sampleweight * 2; return; } int target_class = int(target[0]); if (cost_type==1) // "hinge_loss" { one_against_all_hinge_loss_bprop(output,target_class, output_gradient); if (sampleweight!=1) output_gradient *= sampleweight; } else // (output_cost_type=="discrete_log_likelihood") { for (int i=0;i<n_outputs;i++) { real y_i = (target_class==i)?1:0; output_gradient[i] = sampleweight*(output[i] - y_i); } } }
int PLearn::IncrementalNNet::outputsize | ( | ) | const [virtual] |
Returns the size of this learner's output, (which typically may depend on its inputsize(), targetsize() and set options).
Implements PLearn::PLearner.
Definition at line 320 of file IncrementalNNet.cc.
References n_outputs.
Referenced by computeOutput().
{ return n_outputs; }
void PLearn::IncrementalNNet::residual_correlation_output_gradient | ( | Vec | MAgradients, |
const Vec & | weights, | ||
const Vec & | output_gradient, | ||
real | activation, | ||
real & | hidden_gradient | ||
) | const |
Definition at line 912 of file IncrementalNNet.cc.
References j, moving_average_coefficient, n, PLearn::sign(), and PLearn::TVec< T >::size().
Referenced by train().
{ int n = MAgradients.size(); if ( n > 1 ){ // calculate candidate_unit_output_weight_gradients int max_gradient_index = 0; real max_gradient_value = -1.0; bool initial = ( activation == 0.0 ); for ( int j = 0; j < n; j++ ) { MAgradients[j] = output_gradient[j] * activation * moving_average_coefficient +(1-moving_average_coefficient)*MAgradients[j]; real gradient_abs = fabs( initial ? output_gradient[j] : MAgradients[j] ); if ( gradient_abs > max_gradient_value ){ max_gradient_value = gradient_abs; max_gradient_index = j; } } hidden_gradient = output_gradient[max_gradient_index] * sign( weights[max_gradient_index] ); } else hidden_gradient = output_gradient[0]; }
void PLearn::IncrementalNNet::train | ( | ) | [virtual] |
The role of the train method is to bring the learner up to stage==nstages, updating the train_stats collector with training costs measured on-line in the process.
Implements PLearn::PLearner.
Definition at line 352 of file IncrementalNNet.cc.
References act, boosting, PLearn::bprop_tanh(), candidate_unit_bias, candidate_unit_internal_weight_gradients, candidate_unit_internal_weights, candidate_unit_output_weight_gradients, candidate_unit_output_weights, candidate_unit_weight_gradients, candidate_unit_weights, computeCostsFromOutputs(), computeOutput(), cost_type, current_average_cost, current_example, PLearn::d_hinge_loss(), decay_factor, direct_weight_gradients, direct_weights, PLearn::dot(), enable_internal_weights, PLearn::endl(), PLearn::externalProductAcc(), PLearn::TVec< T >::fill(), forget(), PLearn::VMat::getExample(), h, hard_activation_function, hidden_layer_biases, hidden_layer_weight_gradients, hidden_layer_weights, i, incremental_connections, initial_learning_rate, PLearn::PLearner::inputsize(), internal_weight_gradients, internal_weights, j, PLearn::layerBpropUpdate(), learning_rate, PLearn::TVec< T >::length(), PLearn::VMat::length(), linear_output, max_n_epochs_to_fail, minibatch_size, moving_average_coefficient, PLearn::multiplyAcc(), PLearn::multiplyAdd(), n_examples_seen, n_examples_training_candidate, n_outputs, next_average_cost, PLearn::PLearner::nstages, PLearn::PLearner::nTrainCosts(), online, output_biases, output_cost_type, output_loss(), output_loss_gradient(), output_weight_decay, output_weight_gradients, output_weights, PLERROR, rand_range, residual_correlation_gradient, residual_correlation_output_gradient(), PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), PLearn::sigmoid(), PLearn::sign(), PLearn::softmax(), PLearn::PLearner::stage, PLearn::sumabs(), PLearn::tanh(), PLearn::PLearner::targetsize(), PLearn::TVec< T >::toMat(), PLearn::PLearner::train_set, PLearn::PLearner::train_stats, PLearn::transposedLayerL1BpropUpdate(), update_incremental_connections(), use_hinge_loss_for_hard_activation, and PLearn::PLearner::verbosity.
{ // The role of the train method is to bring the learner up to stage==nstages, // updating train_stats with training costs measured on-line in the process. if (!train_set) PLERROR("IncrementalNNet::train train_set must be set before calling train\n"); if (output_cost_type == "squared_error" && train_set->targetsize() != n_outputs) PLERROR("IncrementalNNet::train with 'squared_error' output_cost_type, train_set->targetsize(%d) should equal n_outputs(%d)", train_set->targetsize(),n_outputs); if ((output_cost_type == "hinge_loss" || output_cost_type == "discrete_log_likelihood") && train_set->targetsize()!=1) PLERROR("IncrementalNNet::train 'hinge_loss' or 'discrete_log_likelihood' output_cost_type is for classification, train_set->targetsize(%d) should be 1", train_set->targetsize()); // if ( incremental_connections && n_outputs != 1 ) // PLERROR("IncrementalNNet::train incremental_connections is only supported with n_outputs == 1\n"); int minibatchsize = minibatch_size; if (minibatch_size == 0) minibatchsize = train_set->length(); real current_average_class_error=0; real next_average_class_error=0; real old_current_average_cost; real old_next_average_cost; static Vec input; // static so we don't reallocate/deallocate memory each time... static Vec output; static Vec target; // (but be careful that static means shared!) static Vec train_costs; static Vec costs_with_candidate; static Vec output_gradient; static Vec hidden_gradient; static Vec output_with_candidate; static Vec output_gradient_with_candidate; static Vec output_with_signchange; static Mat candidate_unit_output_weights_mat; static Vec candidate_h_vec; static Vec candidate_hidden_gradient; static Vec linear_output_with_candidate; int nc=nTrainCosts(); train_costs.resize(nc); costs_with_candidate.resize(nc); input.resize(inputsize()); // the train_set's inputsize() output.resize(n_outputs); output_gradient.resize(n_outputs); hidden_gradient.resize(stage); output_with_candidate.resize(n_outputs); output_gradient_with_candidate.resize(n_outputs); output_with_signchange.resize(n_outputs); target.resize(targetsize()); // the train_set's targetsize() candidate_unit_output_weights_mat = candidate_unit_output_weights.toMat(n_outputs,1); candidate_h_vec.resize(1); candidate_hidden_gradient.resize(1); linear_output_with_candidate.resize(n_outputs); real sampleweight; // the train_set's weight on the current example if(!train_stats) // make a default stats collector, in case there's none train_stats = new VecStatsCollector(); if(nstages<stage) // asking to revert to a previous stage! forget(); // reset the learner to stage=0 bool stopping_criterion_not_met = true; moving_average_coefficient = 1.0/minibatchsize; learning_rate = initial_learning_rate; while(stage<nstages && stopping_criterion_not_met) { // clear statistics of previous epoch train_stats->forget() ; // iterate through the data for some time... do { // compute output and cost train_set->getExample(current_example, input, target, sampleweight); current_example++; if (current_example==train_set->length()) current_example=0; computeOutput(input,output); computeCostsFromOutputs(input,output,target,train_costs); real current_total_cost = train_costs[0]; real current_fit_error = train_costs[1]; real current_class_error = (cost_type!=0)?train_costs[3]:0; train_costs*=sampleweight; train_stats->update(train_costs); // compute output and cost IF WE USED THE CANDIDATE HIDDEN UNIT real candidate_act = dot(input, candidate_unit_weights) + candidate_unit_bias; if ( enable_internal_weights && stage > 0 ) candidate_act += dot( h, candidate_unit_internal_weights ); real candidate_h; if (hard_activation_function) candidate_h = sign(candidate_act); else candidate_h = tanh(candidate_act); candidate_h_vec[0]=candidate_h; // linear_output_with_candidate = linear_output + candidate_unit_output_weight*candidate_h; multiplyAdd(linear_output,candidate_unit_output_weights, candidate_h,linear_output_with_candidate); if (cost_type == 2) // "discrete_log_likelihood" softmax(linear_output_with_candidate,output_with_candidate); else output_with_candidate << linear_output_with_candidate; computeCostsFromOutputs(input,output_with_candidate,target,costs_with_candidate); // computeCostsFromOutputs does not count the cost of the candidate's output weights, so add it: costs_with_candidate[0] += output_weight_decay * sumabs(candidate_unit_output_weights); real candidate_class_error = (cost_type!=0)?costs_with_candidate[3]:0; if ( decay_factor != 0.0 ) learning_rate = initial_learning_rate / ( 1 + n_examples_seen * decay_factor ); // TRAINING OF THE NETWORK // backprop & update regular network parameters // TRAINING OF THE NETWORK if (!boosting) // i.e. continue training the existing hidden units { // ** compute gradient on linear output output_loss_gradient(output, target, output_gradient, sampleweight); // ** bprop through the network & update // bprop on output layer multiplyAcc(output_biases, output_gradient, -learning_rate); if (!incremental_connections){ for ( int i = 0; i < n_outputs; i++ ) multiplyAcc( direct_weights(i), input, output_gradient[i]*(-learning_rate) ); } else { for ( int i = 0; i < n_outputs; i++ ) update_incremental_connections( direct_weights(i), direct_weight_gradients(i), input, output_gradient[i] ); } if (stage>0) { // the method below does: // hidden_gradient[j] = sum_i output_weights[j,i]*output_gradient[i] // output_weights[i,j] -= learning_rate * (output_gradient[i] * h[j] + output_weight_decay * sign(output_weights[i,j])) transposedLayerL1BpropUpdate(hidden_gradient, output_weights, h, output_gradient, learning_rate, output_weight_decay); if ( residual_correlation_gradient ) { if ( n_outputs > 1 ){ for ( int i = 0; i < stage; i++ ) { // calculate output_weight_gradients residual_correlation_output_gradient( output_weight_gradients(i), output_weights(i), output_gradient, h[i], hidden_gradient[i] ); } } else hidden_gradient.fill(output_gradient[0]); } if ( !enable_internal_weights ){ // simple one-hidden-layer topology // bprop through hidden units activation if (hard_activation_function) // Should h_i(x) change of sign? // Consider the loss that would occur if it did, i.e. with output replaced by output - 2*W[.,i]*h_i(x) // Then consider a weighted classification problem // with the appropriate sign and weight = gradient on h_i(x). { for (int i=0;i<int(stage);i++) // loop over hidden units { Vec Wi = output_weights(i); multiplyAdd(output,Wi,-2*h[i],output_with_signchange); real fit_error_with_sign_change = output_loss(output_with_signchange,target); int target_i = int(sign(fit_error_with_sign_change-current_fit_error)*h[i]); real weight_i = fabs(hidden_gradient[i]); // CHECK: when is the sign of hidden_gradient different from (h[i]-target_i)? if (use_hinge_loss_for_hard_activation) hidden_gradient[i] = weight_i * d_hinge_loss(act[i],target_i); else // use cross-entropy hidden_gradient[i] = weight_i * (sigmoid(act[i]) - 2*(target_i+1)); } } else bprop_tanh(h,hidden_gradient,hidden_gradient); // hidden_gradient *= ( 1 - h^2 ) } else { // cascade topology if ( !incremental_connections ){ //if (hard_activation_function) { /*not implemented*/ } else for ( int i = stage-1; i >= 0; i-- ) { // bprop_tanh equivalent, also modifies internal_weights hidden_gradient[i] *= (1 - h[i]*h[i]); for ( int j = 0; j < i; j++ ) { if ( !residual_correlation_gradient ) // back-propagate gradients through internal weights hidden_gradient[j] += internal_weights[i][j] * hidden_gradient[i]; internal_weights[i][j] -= learning_rate * ( hidden_gradient[i] * h[j] ); //+ output_weight_decay * sign(internal_weights[i][j]) ); } } } else { // incremental internal connections for ( int i = stage-1; i >= 0; i-- ) { hidden_gradient[i] *= (1 - h[i]*h[i]); if ( !residual_correlation_gradient ) { for ( int j = 0; j < i; j++ ) // back-propagate gradients through internal connections. hidden_gradient[j] += internal_weights[i][j] * hidden_gradient[i]; } update_incremental_connections( internal_weights[i], internal_weight_gradients[i], h, hidden_gradient[i] ); } //hidden_gradient[0] *= (1 - h[0]*h[0]); // the first unit has no incomming internal connections } } //hidden_gradient *= -learning_rate; hidden_layer_biases -= hidden_gradient * learning_rate; if ( !incremental_connections ) { // bprop through hidden layer and update hidden_weights externalProductAcc(hidden_layer_weights, hidden_gradient * (-learning_rate), input); } else { // incremental_connections for ( int i = 0; i < stage; i++ ){ update_incremental_connections( hidden_layer_weights(i), hidden_layer_weight_gradients(i), input, hidden_gradient[i] ); } } } } //MNT if ( verbosity > 3 ) { cout << "STAGE: " << stage << endl << "input: " << input << endl << "output: " << output << endl << "target: " << target << endl << "train_costs: " << train_costs << endl << "output_gradient: " << output_gradient << endl << "candidate_h: " << candidate_h << endl << "current_average_cost: " << current_average_cost << endl ; if ( stage > 0 ) { cout << "hidden_layer_weights: " << hidden_layer_weights //<< endl << "hidden_layer_biases: " << hidden_layer_biases << endl ; } if ( verbosity > 4 ) { cout << " output_with_candidate: " << output_with_candidate << endl; cout << " target: " << target << endl; cout << " candidate_unit_output_weights_mat(before): " << candidate_unit_output_weights_mat; cout << " candidate_unit_weights (before): " << candidate_unit_weights << endl; cout << " candidate_unit_bias (before): " << candidate_unit_bias << endl; } } // TRAINING OF THE CANDIDATE UNIT // backprop & update candidate hidden unit output_loss_gradient(output_with_candidate, target, output_gradient_with_candidate, sampleweight); // computes candidate_hidden_gradient, and updates candidate_unit_output_weights_mat layerBpropUpdate(candidate_hidden_gradient, candidate_unit_output_weights_mat, candidate_h_vec, output_gradient_with_candidate, learning_rate); if ( residual_correlation_gradient ) { residual_correlation_output_gradient( candidate_unit_output_weight_gradients, candidate_unit_output_weights, output_gradient_with_candidate, candidate_h, candidate_hidden_gradient[0] ); } // bprop through candidate hidden unit activation, heuristic method if (hard_activation_function) { multiplyAdd(output_with_candidate,candidate_unit_output_weights,-2*candidate_h,output_with_signchange); real fit_error_with_sign_change = output_loss(output_with_signchange,target); int hidden_class = int(sign(fit_error_with_sign_change-current_fit_error)*candidate_h); real weight_on_loss = fabs(candidate_hidden_gradient[0]); // CHECK: when is the sign of hidden_gradient different from (h[i]-target_i)? if (use_hinge_loss_for_hard_activation) candidate_hidden_gradient[0] = weight_on_loss * d_hinge_loss(candidate_act,hidden_class); else // use cross-entropy candidate_hidden_gradient[0] = weight_on_loss * (sigmoid(candidate_act) - 2*(hidden_class+1)); } else { bprop_tanh(candidate_h_vec,candidate_hidden_gradient,candidate_hidden_gradient); } //candidate_hidden_gradient *= -learning_rate; candidate_unit_bias -= candidate_hidden_gradient[0] * learning_rate; if ( incremental_connections ) { update_incremental_connections( candidate_unit_weights, candidate_unit_weight_gradients, input, candidate_hidden_gradient[0]); if ( enable_internal_weights && stage > 0 ) { // consider weights from older hidden units update_incremental_connections( candidate_unit_internal_weights, candidate_unit_internal_weight_gradients, h, candidate_hidden_gradient[0]); } } else { // train all connections at once multiplyAcc( candidate_unit_weights, input, candidate_hidden_gradient[0] * (-learning_rate) ); if ( enable_internal_weights && stage > 0 ) // consider weights from older hidden units multiplyAcc( candidate_unit_internal_weights, h, candidate_hidden_gradient[0] * (-learning_rate) ); } //MNT if ( verbosity > 4 ) { cout << " candidate_hidden_gradient: " << candidate_hidden_gradient << endl; cout << " candidate_unit_output_weights_mat(after): " << candidate_unit_output_weights_mat; cout << " candidate_unit_weights (after): " << candidate_unit_weights << endl; cout << " candidate_unit_bias (after): " << candidate_unit_bias << endl; } // keep track of average performance with and without candidate hidden unit n_examples_seen++; int n_batches_seen = n_examples_seen / minibatchsize; int t_since_beginning_of_batch = n_examples_seen - n_batches_seen*minibatchsize; if (!online) moving_average_coefficient = 1.0/(1+t_since_beginning_of_batch); next_average_cost = moving_average_coefficient*costs_with_candidate[0] +(1-moving_average_coefficient)*next_average_cost; if (n_examples_seen==1) { current_average_cost = current_total_cost; old_current_average_cost = current_average_cost; old_next_average_cost = next_average_cost; } else { current_average_cost = moving_average_coefficient*current_total_cost +(1-moving_average_coefficient)*current_average_cost; } if (verbosity>1 && cost_type!=0) { current_average_class_error = moving_average_coefficient*current_class_error +(1-moving_average_coefficient)*current_average_class_error; next_average_class_error = moving_average_coefficient*candidate_class_error +(1-moving_average_coefficient)*next_average_class_error; } // consider inserting the candidate hidden unit (at every minibatchsize examples) if (t_since_beginning_of_batch == 0) { old_current_average_cost = current_average_cost; old_next_average_cost = next_average_cost; n_examples_training_candidate += minibatchsize; if (verbosity>1) { cout << "At t=" << real(n_examples_seen)/train_set->length() << " epochs, estimated average cost = " << current_average_cost << " (with candidate " << next_average_cost << " )"<< endl; if (verbosity>2) cout << "(current cost = " << current_total_cost << "; and with candidate = " << costs_with_candidate[0] << ")" << endl; if (cost_type!=0) cout << "Estimated classification error = " << current_average_class_error << " (with candidate " << next_average_class_error << " )"<< endl; cout << "learning rate = " << learning_rate << endl; } if ( next_average_cost < current_average_cost && stage < nstages ) { // insert candidate hidden unit stage++; output_weights.resize(stage,n_outputs); hidden_layer_weights.resize(stage,inputsize()); hidden_layer_biases.resize(stage); hidden_gradient.resize(stage); output_weights(stage-1) << candidate_unit_output_weights; hidden_layer_weights(stage-1) << candidate_unit_weights; hidden_layer_biases[stage-1] = candidate_unit_bias; if ( incremental_connections ){ hidden_layer_weight_gradients.resize(stage,inputsize()); hidden_layer_weight_gradients(stage-1) << candidate_unit_weight_gradients; } if ( residual_correlation_gradient && n_outputs > 1 ) { output_weight_gradients.resize(stage,n_outputs); output_weight_gradients(stage-1) << candidate_unit_output_weight_gradients; candidate_unit_output_weight_gradients.fill(0.0); } if ( enable_internal_weights ) { internal_weights.resize(stage); internal_weights[stage-1].resize(stage-1); internal_weights[stage-1] << candidate_unit_internal_weights; //if ( stage > 1 ) //cout << "internal_weights.size(): " << internal_weights.size() << endl; candidate_unit_internal_weights.resize(stage); if ( incremental_connections ){ internal_weight_gradients.resize(stage); internal_weight_gradients[stage-1].resize(stage-1); internal_weight_gradients[stage-1] << candidate_unit_internal_weight_gradients; candidate_unit_internal_weight_gradients.resize(stage); candidate_unit_internal_weights.fill(.0); //candidate_unit_internal_weights.fill(0.01/stage); candidate_unit_internal_weight_gradients.fill(.0); } else { candidate_unit_internal_weights.fill(.0); //candidate_unit_internal_weights.fill(0.01/stage); } } act.resize(stage); h.resize(stage); // initialize a new candidate candidate_unit_output_weights.fill(0.01/stage); //candidate_unit_weights.clear(); //MNT if (!incremental_connections) { for( int i=0; i < candidate_unit_weights.length(); i++ ) candidate_unit_weights[i] = ((real)rand()/RAND_MAX - 0.5)*rand_range; } else candidate_unit_weights.fill(.0); candidate_unit_bias = ((real)rand()/RAND_MAX - 0.5)*rand_range; if (verbosity>1) cout << "Adding hidden unit number " << stage << " after training it for " << n_examples_training_candidate << " examples.\n The average cost is " << "expected to decrease from " << current_average_cost << " to " << next_average_cost << "." << endl; n_examples_training_candidate=0; } else {// should we stop? if (n_examples_training_candidate >= max_n_epochs_to_fail*train_set->length()) { stopping_criterion_not_met = false; // STOP if (verbosity>0) cout << "Stopping at " << stage << " units, after seeing " << n_examples_seen << " examples in " << n_examples_seen/train_set->length() << " epochs." << endl << "The next candidate unit yields an apparent average cost of " << next_average_cost << " instead of the current one of " << current_average_cost << endl; } } if (!online) current_average_cost = 0; } } while (stage<nstages && stopping_criterion_not_met); //++stage; train_stats->finalize(); // finalize statistics for this epoch } }
void PLearn::IncrementalNNet::update_incremental_connections | ( | Vec | weights, |
Vec | MAgradients, | ||
const Vec & | input, | ||
real | gradient | ||
) | const |
Definition at line 893 of file IncrementalNNet.cc.
References connection_gradient_threshold, connection_removing_threshold, i, learning_rate, moving_average_coefficient, n, and PLearn::TVec< T >::size().
Referenced by train().
{ int n = weights.size(); for ( int i = 0; i < n; i++ ) { MAgradients[i] = gradient * input[i] * moving_average_coefficient + (1-moving_average_coefficient)*MAgradients[i]; if ( weights[i] == 0.0 ) { if ( fabs(MAgradients[i]) > connection_gradient_threshold ){ // add connection //weights[i] = - 5 * learning_rate * MAgradients[i]; weights[i] -= gradient * input[i] * learning_rate; } } else { if ( fabs( weights[i] ) + fabs( MAgradients[i] ) < connection_removing_threshold ) weights[i] = 0.0; // remove connection else weights[i] -= gradient * input[i] * learning_rate; // update connection } } }
Reimplemented from PLearn::PLearner.
Definition at line 182 of file IncrementalNNet.h.
Definition at line 137 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 108 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().
real PLearn::IncrementalNNet::candidate_unit_bias [protected] |
Definition at line 78 of file IncrementalNNet.h.
Referenced by declareOptions(), forget(), and train().
Definition at line 82 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 81 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 80 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 79 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 77 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), makeDeepCopyFromShallowCopy(), and train().
Vec PLearn::IncrementalNNet::candidate_unit_weights [protected] |
Definition at line 76 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 128 of file IncrementalNNet.h.
Referenced by declareOptions(), and update_incremental_connections().
Definition at line 129 of file IncrementalNNet.h.
Referenced by declareOptions(), and update_incremental_connections().
Definition at line 139 of file IncrementalNNet.h.
Referenced by build_(), computeCostsFromOutputs(), computeOutput(), output_loss(), output_loss_gradient(), and train().
real PLearn::IncrementalNNet::current_average_cost [protected] |
Definition at line 84 of file IncrementalNNet.h.
Referenced by declareOptions(), forget(), and train().
int PLearn::IncrementalNNet::current_example [protected] |
Definition at line 87 of file IncrementalNNet.h.
Definition at line 120 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().
Mat PLearn::IncrementalNNet::direct_weight_gradients [protected] |
Definition at line 67 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), makeDeepCopyFromShallowCopy(), and train().
Mat PLearn::IncrementalNNet::direct_weights [protected] |
Definition at line 66 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 125 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), declareOptions(), and train().
Definition at line 138 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 117 of file IncrementalNNet.h.
Referenced by computeOutput(), declareOptions(), and train().
Vec PLearn::IncrementalNNet::hidden_layer_biases [protected] |
Definition at line 73 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 72 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Mat PLearn::IncrementalNNet::hidden_layer_weights [protected] |
Definition at line 71 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 126 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), and train().
Definition at line 119 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().
TVec<Vec> PLearn::IncrementalNNet::internal_weight_gradients [protected] |
Definition at line 75 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
TVec<Vec> PLearn::IncrementalNNet::internal_weights [protected] |
Definition at line 74 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
real PLearn::IncrementalNNet::learning_rate [protected] |
Definition at line 90 of file IncrementalNNet.h.
Referenced by train(), and update_incremental_connections().
Definition at line 136 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 121 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().
Definition at line 106 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().
Definition at line 111 of file IncrementalNNet.h.
Referenced by declareOptions().
Definition at line 89 of file IncrementalNNet.h.
Referenced by residual_correlation_output_gradient(), train(), and update_incremental_connections().
int PLearn::IncrementalNNet::n_examples_seen [protected] |
Definition at line 83 of file IncrementalNNet.h.
Referenced by declareOptions(), forget(), and train().
Definition at line 86 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().
Definition at line 99 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), output_loss_gradient(), outputsize(), and train().
real PLearn::IncrementalNNet::next_average_cost [protected] |
Definition at line 85 of file IncrementalNNet.h.
Referenced by declareOptions(), forget(), and train().
Definition at line 101 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().
Vec PLearn::IncrementalNNet::output_biases [protected] |
Definition at line 70 of file IncrementalNNet.h.
Referenced by build_(), computeOutput(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 107 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), getTestCostNames(), and train().
Definition at line 100 of file IncrementalNNet.h.
Referenced by computeCostsFromOutputs(), declareOptions(), and train().
Mat PLearn::IncrementalNNet::output_weight_gradients [protected] |
Definition at line 69 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Mat PLearn::IncrementalNNet::output_weights [protected] |
Definition at line 68 of file IncrementalNNet.h.
Referenced by build_(), computeCostsFromOutputs(), computeOutput(), declareOptions(), makeDeepCopyFromShallowCopy(), and train().
Definition at line 124 of file IncrementalNNet.h.
Referenced by declareOptions(), forget(), and train().
Definition at line 131 of file IncrementalNNet.h.
Referenced by build_(), declareOptions(), forget(), and train().
Definition at line 118 of file IncrementalNNet.h.
Referenced by declareOptions(), and train().