PLearn 0.1
|
RBMConnection which uses the tranpose of some other RBMMatrixConnection's weights. More...
#include <RBMMatrixTransposeConnection.h>
Public Member Functions | |
RBMMatrixTransposeConnection (PP< RBMMatrixConnection > the_rbm_matrix_connection=0, real the_learning_rate=0, bool call_build_=false) | |
Default constructor. | |
virtual void | accumulatePosStats (const Vec &down_values, const Vec &up_values) |
Accumulates positive phase statistics to *_pos_stats. | |
virtual void | accumulatePosStats (const Mat &down_values, const Mat &up_values) |
virtual void | accumulateNegStats (const Vec &down_values, const Vec &up_values) |
Accumulates negative phase statistics to *_neg_stats. | |
virtual void | accumulateNegStats (const Mat &down_values, const Mat &up_values) |
virtual void | update () |
Updates parameters according to contrastive divergence gradient. | |
virtual void | update (const Vec &pos_down_values, const Vec &pos_up_values, const Vec &neg_down_values, const Vec &neg_up_values) |
Updates parameters according to contrastive divergence gradient, not using the statistics but the explicit values passed. | |
virtual void | clearStats () |
Clear all information accumulated during stats. | |
virtual void | computeProduct (int start, int length, const Vec &activations, bool accumulate=false) const |
Computes the vectors of activation of "length" units, starting from "start", and stores (or add) them into "activations". | |
virtual void | computeProducts (int start, int length, Mat &activations, bool accumulate=false) const |
Same as 'computeProduct' but for mini-batches. | |
virtual void | bpropUpdate (const Vec &input, const Vec &output, Vec &input_gradient, const Vec &output_gradient, bool accumulate=false) |
Adapt based on the output gradient: this method should only be called just after a corresponding fprop; it should be called with the same arguments as fprop for the first two arguments (and output should not have been modified since then). | |
virtual void | bpropUpdate (const Mat &inputs, const Mat &outputs, Mat &input_gradients, const Mat &output_gradients, bool accumulate=false) |
SOON TO BE DEPRECATED, USE bpropAccUpdate(const TVec<Mat*>& ports_value, const TVec<Mat*>& ports_gradient) | |
virtual void | forget () |
reset the parameters to the state they would be BEFORE starting training. | |
virtual int | nParameters () const |
optionally perform some processing after training, or after a series of fprop/bpropUpdate calls to prepare the model for truly out-of-sample operation. | |
virtual Vec | makeParametersPointHere (const Vec &global_parameters) |
Make the parameters data be sub-vectors of the given global_parameters. | |
virtual string | classname () const |
virtual OptionList & | getOptionList () const |
virtual OptionMap & | getOptionMap () const |
virtual RemoteMethodMap & | getRemoteMethodMap () const |
virtual RBMMatrixTransposeConnection * | deepCopy (CopiesMap &copies) const |
virtual void | build () |
Post-constructor. | |
virtual void | makeDeepCopyFromShallowCopy (CopiesMap &copies) |
Transforms a shallow copy into a deep copy. | |
Static Public Member Functions | |
static string | _classname_ () |
static OptionList & | _getOptionList_ () |
static RemoteMethodMap & | _getRemoteMethodMap_ () |
static Object * | _new_instance_for_typemap_ () |
static bool | _isa_ (const Object *o) |
static void | _static_initialize_ () |
static const PPath & | declaringFile () |
Public Attributes | |
Mat | weights |
Matrix containing unit-to-unit weights ( ![]() | |
PP< RBMMatrixConnection > | rbm_matrix_connection |
RBMMatrixConnection from which the weights are taken. | |
bool | learn_scale |
Indication that the scale of the weight matrix should be learned. | |
real | scale |
Learned scale for weight matrix. | |
Mat | weights_pos_stats |
Accumulates positive contribution to the weights' gradient. | |
Mat | weights_neg_stats |
Accumulates negative contribution to the weights' gradient. | |
Mat | weights_inc |
Used if momentum != 0. | |
Static Public Attributes | |
static StaticInitializer | _static_initializer_ |
Static Protected Member Functions | |
static void | declareOptions (OptionList &ol) |
Declares the class options. | |
Private Types | |
typedef RBMConnection | inherited |
Private Member Functions | |
void | build_ () |
This does the actual building. |
RBMConnection which uses the tranpose of some other RBMMatrixConnection's weights.
Definition at line 53 of file RBMMatrixTransposeConnection.h.
typedef RBMConnection PLearn::RBMMatrixTransposeConnection::inherited [private] |
Reimplemented from PLearn::RBMConnection.
Definition at line 55 of file RBMMatrixTransposeConnection.h.
PLearn::RBMMatrixTransposeConnection::RBMMatrixTransposeConnection | ( | PP< RBMMatrixConnection > | the_rbm_matrix_connection = 0 , |
real | the_learning_rate = 0 , |
||
bool | call_build_ = false |
||
) |
Default constructor.
Definition at line 51 of file RBMMatrixTransposeConnection.cc.
References build_().
: inherited(the_learning_rate, call_build_), rbm_matrix_connection(the_rbm_matrix_connection), learn_scale( false ), scale( 1.0 ) { if (call_build_) build_(); }
string PLearn::RBMMatrixTransposeConnection::_classname_ | ( | ) | [static] |
Reimplemented from PLearn::RBMConnection.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
OptionList & PLearn::RBMMatrixTransposeConnection::_getOptionList_ | ( | ) | [static] |
Reimplemented from PLearn::RBMConnection.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
RemoteMethodMap & PLearn::RBMMatrixTransposeConnection::_getRemoteMethodMap_ | ( | ) | [static] |
Reimplemented from PLearn::RBMConnection.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
Reimplemented from PLearn::RBMConnection.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
Object * PLearn::RBMMatrixTransposeConnection::_new_instance_for_typemap_ | ( | ) | [static] |
Reimplemented from PLearn::Object.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
StaticInitializer RBMMatrixTransposeConnection::_static_initializer_ & PLearn::RBMMatrixTransposeConnection::_static_initialize_ | ( | ) | [static] |
Reimplemented from PLearn::RBMConnection.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
virtual void PLearn::RBMMatrixTransposeConnection::accumulateNegStats | ( | const Mat & | down_values, |
const Mat & | up_values | ||
) | [inline, virtual] |
Implements PLearn::RBMConnection.
Definition at line 110 of file RBMMatrixTransposeConnection.h.
References PLASSERT_MSG.
{ PLASSERT_MSG( false, "Not implemented" ); }
void PLearn::RBMMatrixTransposeConnection::accumulateNegStats | ( | const Vec & | down_values, |
const Vec & | up_values | ||
) | [virtual] |
Accumulates negative phase statistics to *_neg_stats.
Implements PLearn::RBMConnection.
Definition at line 149 of file RBMMatrixTransposeConnection.cc.
References PLearn::externalProductAcc(), PLearn::RBMConnection::neg_count, and weights_neg_stats.
{ // weights_neg_stats += down_values * up_values' externalProductAcc( weights_neg_stats, down_values, up_values ); neg_count++; }
virtual void PLearn::RBMMatrixTransposeConnection::accumulatePosStats | ( | const Mat & | down_values, |
const Mat & | up_values | ||
) | [inline, virtual] |
Implements PLearn::RBMConnection.
Definition at line 100 of file RBMMatrixTransposeConnection.h.
References PLASSERT_MSG.
{ PLASSERT_MSG( false, "Not implemented" ); }
void PLearn::RBMMatrixTransposeConnection::accumulatePosStats | ( | const Vec & | down_values, |
const Vec & | up_values | ||
) | [virtual] |
Accumulates positive phase statistics to *_pos_stats.
Implements PLearn::RBMConnection.
Definition at line 140 of file RBMMatrixTransposeConnection.cc.
References PLearn::externalProductAcc(), PLearn::RBMConnection::pos_count, and weights_pos_stats.
{ // weights_pos_stats += down_values * up_values' externalProductAcc( weights_pos_stats, down_values, up_values ); pos_count++; }
void PLearn::RBMMatrixTransposeConnection::bpropUpdate | ( | const Vec & | input, |
const Vec & | output, | ||
Vec & | input_gradient, | ||
const Vec & | output_gradient, | ||
bool | accumulate = false |
||
) | [virtual] |
Adapt based on the output gradient: this method should only be called just after a corresponding fprop; it should be called with the same arguments as fprop for the first two arguments (and output should not have been modified since then).
this version allows to obtain the input gradient as well
Since sub-classes are supposed to learn ONLINE, the object is 'ready-to-be-used' just after any bpropUpdate. N.B. A DEFAULT IMPLEMENTATION IS PROVIDED IN THE SUPER-CLASS, WHICH JUST CALLS bpropUpdate(input, output, input_gradient, output_gradient) AND IGNORES INPUT GRADIENT. this version allows to obtain the input gradient as well N.B. THE DEFAULT IMPLEMENTATION IN SUPER-CLASS JUST RAISES A PLERROR.
Reimplemented from PLearn::OnlineLearningModule.
Definition at line 356 of file RBMMatrixTransposeConnection.cc.
References PLearn::TVec< T >::data(), PLearn::RBMConnection::down_size, PLearn::externalProductScaleAcc(), i, in, j, learn_scale, PLearn::RBMConnection::learning_rate, PLearn::TMat< T >::length(), PLASSERT, PLASSERT_MSG, PLearn::product(), PLearn::productAcc(), rbm_matrix_connection, PLearn::TVec< T >::resize(), scale, PLearn::TVec< T >::size(), PLearn::RBMConnection::up_size, weights, and PLearn::TMat< T >::width().
{ PLASSERT( input.size() == down_size ); PLASSERT( output.size() == up_size ); PLASSERT( output_gradient.size() == up_size ); PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n"); if( accumulate ) { PLASSERT_MSG( input_gradient.size() == down_size, "Cannot resize input_gradient AND accumulate into it" ); // input_gradient += weights' * output_gradient productAcc( input_gradient, weights, output_gradient ); } else { input_gradient.resize( down_size ); // input_gradient = weights' * output_gradient product( input_gradient, weights, output_gradient ); } // weights -= learning_rate * output_gradient * input' externalProductScaleAcc( weights, input, output_gradient, -learning_rate ); if( learn_scale ) { real* in = input.data(); real* out_g; real* wj; for( int j=0; j<weights.width(); j++) { out_g = output_gradient.data(); wj = weights[j]; for( int i=0; i<weights.length(); i++ ) scale -= learning_rate * out_g[i] * wj[i] * in[j]; } } }
void PLearn::RBMMatrixTransposeConnection::bpropUpdate | ( | const Mat & | inputs, |
const Mat & | outputs, | ||
Mat & | input_gradients, | ||
const Mat & | output_gradients, | ||
bool | accumulate = false |
||
) | [virtual] |
SOON TO BE DEPRECATED, USE bpropAccUpdate(const TVec<Mat*>& ports_value, const TVec<Mat*>& ports_gradient)
Reimplemented from PLearn::OnlineLearningModule.
Definition at line 400 of file RBMMatrixTransposeConnection.cc.
References PLearn::RBMConnection::down_size, i, in, j, learn_scale, PLearn::RBMConnection::learning_rate, PLearn::TMat< T >::length(), PLASSERT, PLASSERT_MSG, PLearn::productTranspose(), PLearn::productTransposeAcc(), rbm_matrix_connection, PLearn::TMat< T >::resize(), scale, PLearn::transposeProductScaleAcc(), PLearn::RBMConnection::up_size, weights, and PLearn::TMat< T >::width().
{ PLASSERT( inputs.width() == down_size ); PLASSERT( outputs.width() == up_size ); PLASSERT( output_gradients.width() == up_size ); PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n"); if( accumulate ) { PLASSERT_MSG( input_gradients.width() == down_size && input_gradients.length() == inputs.length(), "Cannot resize input_gradients and accumulate into it" ); // input_gradients += output_gradient * weights productTransposeAcc(input_gradients, output_gradients, weights); } else { input_gradients.resize(inputs.length(), down_size); // input_gradients = output_gradient * weights productTranspose(input_gradients, output_gradients, weights); } // weights -= learning_rate/n * output_gradients' * inputs transposeProductScaleAcc(weights, inputs, output_gradients, -learning_rate / inputs.length(), real(1)); if( learn_scale ) { for( int t=0; t<inputs.length(); t++) { real* in = inputs[t]; real* out_g; real* wj; for( int j=0; j<weights.width(); j++) { out_g = output_gradients[t]; wj = weights[j]; for( int i=0; i<weights.length(); i++ ) scale -= learning_rate * out_g[i] * wj[i] * in[j]; } } } }
void PLearn::RBMMatrixTransposeConnection::build | ( | ) | [virtual] |
Post-constructor.
The normal implementation should call simply inherited::build(), then this class's build_(). This method should be callable again at later times, after modifying some option fields to change the "architecture" of the object.
Reimplemented from PLearn::RBMConnection.
Definition at line 122 of file RBMMatrixTransposeConnection.cc.
References PLearn::RBMConnection::build(), and build_().
Referenced by PLearn::DeepBeliefNet::train().
{ inherited::build(); build_(); }
void PLearn::RBMMatrixTransposeConnection::build_ | ( | ) | [private] |
This does the actual building.
Reimplemented from PLearn::RBMConnection.
Definition at line 93 of file RBMMatrixTransposeConnection.cc.
References clearStats(), PLearn::RBMConnection::down_size, PLearn::OnlineLearningModule::input_size, PLearn::RBMConnection::momentum, PLearn::OnlineLearningModule::output_size, PLearn::OnlineLearningModule::random_gen, rbm_matrix_connection, PLearn::TMat< T >::resize(), PLearn::RBMConnection::up_size, weights, weights_inc, weights_neg_stats, and weights_pos_stats.
Referenced by build(), and RBMMatrixTransposeConnection().
{ if( !rbm_matrix_connection ) return; // If we have a random_gen and rbm_matrix_connection does not, share it if( random_gen && !(rbm_matrix_connection->random_gen) ) { rbm_matrix_connection->random_gen = random_gen; rbm_matrix_connection->forget(); } weights = rbm_matrix_connection->weights; down_size = rbm_matrix_connection->up_size; up_size = rbm_matrix_connection->down_size; // For compatibility with OnlineLearningModule inherited functions input_size = down_size; output_size = up_size; weights_pos_stats.resize( down_size, up_size ); weights_neg_stats.resize( down_size, up_size ); if( momentum != 0. ) weights_inc.resize( down_size, up_size ); clearStats(); }
string PLearn::RBMMatrixTransposeConnection::classname | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
void PLearn::RBMMatrixTransposeConnection::clearStats | ( | ) | [virtual] |
Clear all information accumulated during stats.
Implements PLearn::RBMConnection.
Definition at line 270 of file RBMMatrixTransposeConnection.cc.
References PLearn::TMat< T >::clear(), PLearn::RBMConnection::neg_count, PLearn::RBMConnection::pos_count, weights_neg_stats, and weights_pos_stats.
Referenced by build_(), forget(), and update().
{ weights_pos_stats.clear(); weights_neg_stats.clear(); pos_count = 0; neg_count = 0; }
void PLearn::RBMMatrixTransposeConnection::computeProduct | ( | int | start, |
int | length, | ||
const Vec & | activations, | ||
bool | accumulate = false |
||
) | const [virtual] |
Computes the vectors of activation of "length" units, starting from "start", and stores (or add) them into "activations".
"start" indexes an up unit if "going_up", else a down unit.
Implements PLearn::RBMConnection.
Definition at line 279 of file RBMMatrixTransposeConnection.cc.
References PLearn::RBMConnection::down_size, PLearn::RBMConnection::going_up, PLearn::RBMConnection::input_vec, learn_scale, PLearn::TVec< T >::length(), PLASSERT, PLASSERT_MSG, PLearn::product(), PLearn::productAcc(), rbm_matrix_connection, scale, PLearn::TMat< T >::subMatColumns(), PLearn::TMat< T >::subMatRows(), PLearn::transposeProduct(), PLearn::transposeProductAcc(), PLearn::RBMConnection::up_size, and weights.
{ PLASSERT( activations.length() == length ); PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n"); if( going_up ) { PLASSERT( start+length <= up_size ); // activations[i-start] += sum_j weights(i,j) input_vec[j] if( accumulate ) transposeProductAcc( activations, weights.subMatColumns(start,length), input_vec ); else transposeProduct( activations, weights.subMatColumns(start,length), input_vec ); } else { PLASSERT( start+length <= down_size ); // activations[i-start] += sum_j weights(j,i) input_vec[j] if( accumulate ) productAcc( activations, weights.subMatRows(start,length), input_vec ); else product( activations, weights.subMatRows(start,length), input_vec ); } if( learn_scale) activations *= scale; }
void PLearn::RBMMatrixTransposeConnection::computeProducts | ( | int | start, |
int | length, | ||
Mat & | activations, | ||
bool | accumulate = false |
||
) | const [virtual] |
Same as 'computeProduct' but for mini-batches.
Implements PLearn::RBMConnection.
Definition at line 317 of file RBMMatrixTransposeConnection.cc.
References PLearn::RBMConnection::down_size, PLearn::RBMConnection::going_up, PLearn::RBMConnection::inputs_mat, learn_scale, PLearn::TMat< T >::length(), PLASSERT, PLASSERT_MSG, PLearn::product(), PLearn::productAcc(), PLearn::productTranspose(), PLearn::productTransposeAcc(), rbm_matrix_connection, PLearn::TMat< T >::resize(), scale, PLearn::TMat< T >::subMatColumns(), PLearn::TMat< T >::subMatRows(), PLearn::RBMConnection::up_size, and weights.
{ PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n"); activations.resize(inputs_mat.length(), length); if( going_up ) { PLASSERT( start+length <= up_size ); // activations(k, i-start) += sum_j weights(i,j) inputs_mat(k, j) if( accumulate ) productAcc(activations, inputs_mat, weights.subMatColumns(start,length)); else product(activations, inputs_mat, weights.subMatColumns(start,length)); } else { PLASSERT( start+length <= down_size ); // activations(k, i-start) += sum_j weights(j,i) inputs_mat(k, j) if( accumulate ) productTransposeAcc(activations, inputs_mat, weights.subMatRows(start,length) ); else productTranspose(activations, inputs_mat, weights.subMatRows(start,length) ); } if( learn_scale) activations *= scale; }
void PLearn::RBMMatrixTransposeConnection::declareOptions | ( | OptionList & | ol | ) | [static, protected] |
Declares the class options.
Reimplemented from PLearn::RBMConnection.
Definition at line 64 of file RBMMatrixTransposeConnection.cc.
References PLearn::OptionBase::buildoption, PLearn::declareOption(), PLearn::RBMConnection::declareOptions(), PLearn::RBMConnection::down_size, learn_scale, PLearn::OptionBase::learntoption, rbm_matrix_connection, PLearn::redeclareOption(), scale, and PLearn::RBMConnection::up_size.
{ declareOption(ol, "rbm_matrix_connection", &RBMMatrixTransposeConnection::rbm_matrix_connection, OptionBase::buildoption, "RBMMatrixConnection from which the weights are taken"); declareOption(ol, "learn_scale", &RBMMatrixTransposeConnection::learn_scale, OptionBase::buildoption, "Indication that the scale of the weight matrix should be " "learned.\n"); declareOption(ol, "scale", &RBMMatrixTransposeConnection::scale, OptionBase::learntoption, "Learned scale for weight matrix.\n"); // Now call the parent class' declareOptions inherited::declareOptions(ol); redeclareOption(ol, "up_size", &RBMConnection::up_size, OptionBase::learntoption, "Is set to rbm_matrix_connection->down_size."); redeclareOption(ol, "down_size", &RBMConnection::down_size, OptionBase::learntoption, "Is set to rbm_matrix_connection->up_size."); }
static const PPath& PLearn::RBMMatrixTransposeConnection::declaringFile | ( | ) | [inline, static] |
Reimplemented from PLearn::RBMConnection.
Definition at line 190 of file RBMMatrixTransposeConnection.h.
:
//##### Protected Member Functions ######################################
RBMMatrixTransposeConnection * PLearn::RBMMatrixTransposeConnection::deepCopy | ( | CopiesMap & | copies | ) | const [virtual] |
Reimplemented from PLearn::RBMConnection.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
void PLearn::RBMMatrixTransposeConnection::forget | ( | ) | [virtual] |
reset the parameters to the state they would be BEFORE starting training.
Note that this method is necessarily called from build().
Implements PLearn::OnlineLearningModule.
Definition at line 451 of file RBMMatrixTransposeConnection.cc.
References clearStats(), learn_scale, PLASSERT_MSG, PLWARNING, PLearn::OnlineLearningModule::random_gen, rbm_matrix_connection, and scale.
{ PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n"); clearStats(); if( !random_gen ) { PLWARNING("RBMMatrixTransposeConnection: cannot forget() without" " random_gen"); return; } if( !(rbm_matrix_connection->random_gen) ) rbm_matrix_connection->random_gen = random_gen; rbm_matrix_connection->forget(); if( learn_scale ) scale = 1; }
OptionList & PLearn::RBMMatrixTransposeConnection::getOptionList | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
OptionMap & PLearn::RBMMatrixTransposeConnection::getOptionMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
RemoteMethodMap & PLearn::RBMMatrixTransposeConnection::getRemoteMethodMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file RBMMatrixTransposeConnection.cc.
void PLearn::RBMMatrixTransposeConnection::makeDeepCopyFromShallowCopy | ( | CopiesMap & | copies | ) | [virtual] |
Transforms a shallow copy into a deep copy.
Reimplemented from PLearn::RBMConnection.
Definition at line 129 of file RBMMatrixTransposeConnection.cc.
References PLearn::deepCopyField(), PLearn::RBMConnection::makeDeepCopyFromShallowCopy(), rbm_matrix_connection, weights, weights_inc, weights_neg_stats, and weights_pos_stats.
{ inherited::makeDeepCopyFromShallowCopy(copies); deepCopyField(weights, copies); deepCopyField(rbm_matrix_connection, copies); deepCopyField(weights_pos_stats, copies); deepCopyField(weights_neg_stats, copies); deepCopyField(weights_inc, copies); }
Vec PLearn::RBMMatrixTransposeConnection::makeParametersPointHere | ( | const Vec & | global_parameters | ) | [virtual] |
Make the parameters data be sub-vectors of the given global_parameters.
The argument should have size >= nParameters. The result is a Vec that starts just after this object's parameters end, i.e. result = global_parameters.subVec(nParameters(),global_parameters.size()-nParameters()); This allows to easily chain calls of this method on multiple RBMParameters.
Implements PLearn::RBMConnection.
Definition at line 490 of file RBMMatrixTransposeConnection.cc.
References PLASSERT_MSG, rbm_matrix_connection, and weights.
{ PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n"); Vec ret = rbm_matrix_connection->makeParametersPointHere(global_parameters); weights = rbm_matrix_connection->weights; return ret; }
int PLearn::RBMMatrixTransposeConnection::nParameters | ( | ) | const [virtual] |
optionally perform some processing after training, or after a series of fprop/bpropUpdate calls to prepare the model for truly out-of-sample operation.
return the number of parameters
THE DEFAULT IMPLEMENTATION PROVIDED IN THE SUPER-CLASS DOES NOT DO ANYTHING. return the number of parameters
Implements PLearn::RBMConnection.
Definition at line 480 of file RBMMatrixTransposeConnection.cc.
References PLearn::TMat< T >::size(), and weights.
{ return weights.size(); }
void PLearn::RBMMatrixTransposeConnection::update | ( | ) | [virtual] |
Updates parameters according to contrastive divergence gradient.
Implements PLearn::RBMConnection.
Definition at line 158 of file RBMMatrixTransposeConnection.cc.
References clearStats(), PLearn::TMat< T >::data(), i, j, learn_scale, PLearn::RBMConnection::learning_rate, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLearn::RBMConnection::momentum, PLearn::RBMConnection::neg_count, PLERROR, PLearn::RBMConnection::pos_count, PLearn::TMat< T >::resize(), w, weights, weights_inc, weights_neg_stats, weights_pos_stats, and PLearn::TMat< T >::width().
{ if( learn_scale ) PLERROR("In RBMMatrixTransposeConnection::update(): not implemented " "for learned scale"); // updates parameters //weights -= learning_rate * (weights_pos_stats/pos_count // - weights_neg_stats/neg_count) real pos_factor = -learning_rate / pos_count; real neg_factor = learning_rate / neg_count; int l = weights.length(); int w = weights.width(); real* w_i = weights.data(); real* wps_i = weights_pos_stats.data(); real* wns_i = weights_neg_stats.data(); int w_mod = weights.mod(); int wps_mod = weights_pos_stats.mod(); int wns_mod = weights_neg_stats.mod(); if( momentum == 0. ) { // no need to use weights_inc for( int i=0 ; i<l ; i++, w_i+=w_mod, wps_i+=wps_mod, wns_i+=wns_mod ) for( int j=0 ; j<w ; j++ ) w_i[j] += pos_factor * wps_i[j] + neg_factor * wns_i[j]; } else { // ensure that weights_inc has the right size weights_inc.resize( l, w ); // The update rule becomes: // weights_inc = momentum * weights_inc // - learning_rate * (weights_pos_stats/pos_count // - weights_neg_stats/neg_count); // weights += weights_inc; real* winc_i = weights_inc.data(); int winc_mod = weights_inc.mod(); for( int i=0 ; i<l ; i++, w_i += w_mod, wps_i += wps_mod, wns_i += wns_mod, winc_i += winc_mod ) for( int j=0 ; j<w ; j++ ) { winc_i[j] = momentum * winc_i[j] + pos_factor * wps_i[j] + neg_factor * wns_i[j]; w_i[j] += winc_i[j]; } } clearStats(); }
void PLearn::RBMMatrixTransposeConnection::update | ( | const Vec & | pos_down_values, |
const Vec & | pos_up_values, | ||
const Vec & | neg_down_values, | ||
const Vec & | neg_up_values | ||
) | [virtual] |
Updates parameters according to contrastive divergence gradient, not using the statistics but the explicit values passed.
Reimplemented from PLearn::RBMConnection.
Definition at line 213 of file RBMMatrixTransposeConnection.cc.
References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), i, j, learn_scale, PLearn::RBMConnection::learning_rate, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLearn::RBMConnection::momentum, PLASSERT, PLASSERT_MSG, PLERROR, rbm_matrix_connection, PLearn::TMat< T >::resize(), w, weights, weights_inc, and PLearn::TMat< T >::width().
{ if( learn_scale ) PLERROR("In RBMMatrixTransposeConnection::update(): not implemented " "for learned scale"); PLASSERT_MSG( rbm_matrix_connection, "RBMMatrixTransposeConnection must be given an rbm_matrix_connection.\n"); // weights -= learning_rate * ( h_0 v_0' - h_1 v_1' ); // or: // weights[i][j] += learning_rate * (h_1[i] v_1[j] - h_0[i] v_0[j]); int l = weights.length(); int w = weights.width(); PLASSERT( pos_up_values.length() == l ); PLASSERT( neg_up_values.length() == l ); PLASSERT( pos_down_values.length() == w ); PLASSERT( neg_down_values.length() == w ); real* w_i = weights.data(); real* puv_i = pos_up_values.data(); real* nuv_i = neg_up_values.data(); real* pdv = pos_down_values.data(); real* ndv = neg_down_values.data(); int w_mod = weights.mod(); if( momentum == 0. ) { for( int i=0 ; i<l ; i++, w_i += w_mod, puv_i++, nuv_i++ ) for( int j=0 ; j<w ; j++ ) w_i[j] += learning_rate * (*nuv_i * ndv[j] - *puv_i * pdv[j]); } else { // ensure that weights_inc has the right size weights_inc.resize( l, w ); // The update rule becomes: // weights_inc = momentum * weights_inc // - learning_rate * ( h_0 v_0' - h_1 v_1' ); // weights += weights_inc; real* winc_i = weights_inc.data(); int winc_mod = weights_inc.mod(); for( int i=0 ; i<l ; i++, w_i += w_mod, winc_i += winc_mod, puv_i++, nuv_i++ ) for( int j=0 ; j<w ; j++ ) { winc_i[j] = momentum * winc_i[j] + learning_rate * (*nuv_i * ndv[j] - *puv_i * pdv[j]); w_i[j] += winc_i[j]; } } }
Reimplemented from PLearn::RBMConnection.
Definition at line 190 of file RBMMatrixTransposeConnection.h.
Indication that the scale of the weight matrix should be learned.
Definition at line 69 of file RBMMatrixTransposeConnection.h.
Referenced by bpropUpdate(), computeProduct(), computeProducts(), declareOptions(), forget(), and update().
RBMMatrixConnection from which the weights are taken.
Definition at line 66 of file RBMMatrixTransposeConnection.h.
Referenced by bpropUpdate(), build_(), computeProduct(), computeProducts(), declareOptions(), forget(), makeDeepCopyFromShallowCopy(), makeParametersPointHere(), PLearn::DeepBeliefNet::train(), and update().
Learned scale for weight matrix.
Definition at line 74 of file RBMMatrixTransposeConnection.h.
Referenced by bpropUpdate(), computeProduct(), computeProducts(), declareOptions(), and forget().
Matrix containing unit-to-unit weights ( )
Definition at line 63 of file RBMMatrixTransposeConnection.h.
Referenced by bpropUpdate(), build_(), computeProduct(), computeProducts(), makeDeepCopyFromShallowCopy(), makeParametersPointHere(), nParameters(), and update().
Used if momentum != 0.
Definition at line 83 of file RBMMatrixTransposeConnection.h.
Referenced by build_(), makeDeepCopyFromShallowCopy(), and update().
Accumulates negative contribution to the weights' gradient.
Definition at line 80 of file RBMMatrixTransposeConnection.h.
Referenced by accumulateNegStats(), build_(), clearStats(), makeDeepCopyFromShallowCopy(), and update().
Accumulates positive contribution to the weights' gradient.
Definition at line 77 of file RBMMatrixTransposeConnection.h.
Referenced by accumulatePosStats(), build_(), clearStats(), makeDeepCopyFromShallowCopy(), and update().