|
PLearn 0.1
|
Layer in an RBM formed with binomial units. More...
#include <RBMMultinomialLayer.h>


Public Member Functions | |
| RBMMultinomialLayer () | |
| Default constructor. | |
| RBMMultinomialLayer (int the_size) | |
| Constructor from the number of units in the multinomial. | |
| virtual void | getUnitActivations (int i, PP< RBMParameters > rbmp, int offset=0) |
| Uses "rbmp" to obtain the activations of unit "i" of this layer. | |
| virtual void | getAllActivations (PP< RBMParameters > rbmp, int offset=0) |
| Uses "rbmp" to obtain the activations of all units in this layer. | |
| virtual void | generateSample () |
| generate a sample, and update the sample field | |
| virtual void | computeExpectation () |
| compute the expectation | |
| virtual void | bpropUpdate (const Vec &input, const Vec &output, Vec &input_gradient, const Vec &output_gradient) |
| back-propagates the output gradient to the input | |
| virtual string | classname () const |
| virtual OptionList & | getOptionList () const |
| virtual OptionMap & | getOptionMap () const |
| virtual RemoteMethodMap & | getRemoteMethodMap () const |
| virtual RBMMultinomialLayer * | deepCopy (CopiesMap &copies) const |
| virtual void | build () |
| Post-constructor. | |
| virtual void | makeDeepCopyFromShallowCopy (CopiesMap &copies) |
| Transforms a shallow copy into a deep copy. | |
| RBMMultinomialLayer (real the_learning_rate=0.) | |
| Default constructor. | |
| RBMMultinomialLayer (int the_size, real the_learning_rate=0.) | |
| Constructor from the number of units in the multinomial. | |
| virtual void | generateSample () |
| generate a sample, and update the sample field | |
| virtual void | generateSamples () |
| batch version | |
| virtual void | computeExpectation () |
| compute the expectation | |
| virtual void | computeExpectations () |
| batch version | |
| virtual void | fprop (const Vec &input, Vec &output) const |
| forward propagation | |
| virtual void | fprop (const Vec &input, const Vec &rbm_bias, Vec &output) const |
| forward propagation with provided bias | |
| virtual void | bpropUpdate (const Vec &input, const Vec &output, Vec &input_gradient, const Vec &output_gradient, bool accumulate=false) |
| back-propagates the output gradient to the input | |
| virtual void | bpropUpdate (const Mat &inputs, const Mat &outputs, Mat &input_gradients, const Mat &output_gradients, bool accumulate=false) |
| Back-propagate the output gradient to the input, and update parameters. | |
| virtual void | bpropUpdate (const Vec &input, const Vec &rbm_bias, const Vec &output, Vec &input_gradient, Vec &rbm_bias_gradient, const Vec &output_gradient) |
| back-propagates the output gradient to the input and the bias | |
| virtual real | fpropNLL (const Vec &target) |
| Computes the negative log-likelihood of target given the internal activations of the layer. | |
| virtual void | fpropNLL (const Mat &targets, const Mat &costs_column) |
| virtual void | bpropNLL (const Vec &target, real nll, Vec &bias_gradient) |
| Computes the gradient of the negative log-likelihood of target with respect to the layer's bias, given the internal activations. | |
| virtual void | bpropNLL (const Mat &targets, const Mat &costs_column, Mat &bias_gradients) |
| virtual real | energy (const Vec &unit_values) const |
| virtual real | freeEnergyContribution (const Vec &unit_activations) const |
Computes This quantity is used for computing the free energy of a sample x in the OTHER layer of an RBM, from which unit_activations was computed. | |
| virtual void | freeEnergyContributionGradient (const Vec &unit_activations, Vec &unit_activations_gradient, real output_gradient=1, bool accumulate=false) const |
Computes gradient of the result of freeEnergyContribution with respect to unit_activations. | |
| virtual int | getConfigurationCount () |
| Returns a number of different configurations the layer can be in. | |
| virtual void | getConfiguration (int conf_index, Vec &output) |
| Computes the conf_index configuration of the layer. | |
| virtual string | classname () const |
| virtual OptionList & | getOptionList () const |
| virtual OptionMap & | getOptionMap () const |
| virtual RemoteMethodMap & | getRemoteMethodMap () const |
| virtual RBMMultinomialLayer * | deepCopy (CopiesMap &copies) const |
| virtual void | build () |
| Post-constructor. | |
| virtual void | makeDeepCopyFromShallowCopy (CopiesMap &copies) |
| Transforms a shallow copy into a deep copy. | |
Static Public Member Functions | |
| static string | _classname_ () |
| static OptionList & | _getOptionList_ () |
| static RemoteMethodMap & | _getRemoteMethodMap_ () |
| static Object * | _new_instance_for_typemap_ () |
| static bool | _isa_ (const Object *o) |
| static void | _static_initialize_ () |
| static const PPath & | declaringFile () |
| static string | _classname_ () |
| static OptionList & | _getOptionList_ () |
| static RemoteMethodMap & | _getRemoteMethodMap_ () |
| static Object * | _new_instance_for_typemap_ () |
| static bool | _isa_ (const Object *o) |
| static void | _static_initialize_ () |
| static const PPath & | declaringFile () |
Static Public Attributes | |
| static StaticInitializer | _static_initializer_ |
Static Protected Member Functions | |
| static void | declareOptions (OptionList &ol) |
| Declares the class options. | |
| static void | declareOptions (OptionList &ol) |
| Declares the class options. | |
Protected Attributes | |
| Vec | tmp_softmax |
Private Types | |
| typedef RBMLayer | inherited |
| typedef RBMLayer | inherited |
Private Member Functions | |
| void | build_ () |
| This does the actual building. | |
| void | build_ () |
| This does the actual building. | |
Layer in an RBM formed with binomial units.
Layer in an RBM consisting in one multinomial unit.
Definition at line 54 of file DEPRECATED/RBMMultinomialLayer.h.
typedef RBMLayer PLearn::RBMMultinomialLayer::inherited [private] |
Reimplemented from PLearn::RBMLayer.
Definition at line 56 of file DEPRECATED/RBMMultinomialLayer.h.
typedef RBMLayer PLearn::RBMMultinomialLayer::inherited [private] |
Reimplemented from PLearn::RBMLayer.
Definition at line 55 of file RBMMultinomialLayer.h.
| PLearn::RBMMultinomialLayer::RBMMultinomialLayer | ( | ) |
| PLearn::RBMMultinomialLayer::RBMMultinomialLayer | ( | int | the_size | ) |
Constructor from the number of units in the multinomial.
Definition at line 55 of file DEPRECATED/RBMMultinomialLayer.cc.
References PLearn::TVec< T >::resize(), and PLearn::sample().
{
size = the_size;
units_types = string( the_size, 'l' );
activations.resize( the_size );
sample.resize( the_size );
expectation.resize( the_size );
expectation_is_up_to_date = false;
}

| PLearn::RBMMultinomialLayer::RBMMultinomialLayer | ( | real | the_learning_rate = 0. | ) |
Default constructor.
Definition at line 53 of file RBMMultinomialLayer.cc.
:
inherited( the_learning_rate )
{
}
Constructor from the number of units in the multinomial.
Definition at line 58 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::activation, PLearn::RBMLayer::bias, PLearn::RBMLayer::bias_neg_stats, PLearn::RBMLayer::bias_pos_stats, PLearn::RBMLayer::expectation, PLearn::TVec< T >::resize(), PLearn::RBMLayer::sample, and PLearn::RBMLayer::size.
:
inherited( the_learning_rate )
{
size = the_size;
activation.resize( the_size );
sample.resize( the_size );
expectation.resize( the_size );
bias.resize( the_size );
bias_pos_stats.resize( the_size );
bias_neg_stats.resize( the_size );
}

| string PLearn::RBMMultinomialLayer::_classname_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| static string PLearn::RBMMultinomialLayer::_classname_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
| OptionList & PLearn::RBMMultinomialLayer::_getOptionList_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| static OptionList& PLearn::RBMMultinomialLayer::_getOptionList_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
| RemoteMethodMap & PLearn::RBMMultinomialLayer::_getRemoteMethodMap_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| static RemoteMethodMap& PLearn::RBMMultinomialLayer::_getRemoteMethodMap_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Reimplemented from PLearn::RBMLayer.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
Reimplemented from PLearn::RBMLayer.
| static Object* PLearn::RBMMultinomialLayer::_new_instance_for_typemap_ | ( | ) | [static] |
Reimplemented from PLearn::Object.
| Object * PLearn::RBMMultinomialLayer::_new_instance_for_typemap_ | ( | ) | [static] |
Reimplemented from PLearn::Object.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| StaticInitializer RBMMultinomialLayer::_static_initializer_ & PLearn::RBMMultinomialLayer::_static_initialize_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| static void PLearn::RBMMultinomialLayer::_static_initialize_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
| void PLearn::RBMMultinomialLayer::bpropNLL | ( | const Vec & | target, |
| real | nll, | ||
| Vec & | bias_gradient | ||
| ) | [virtual] |
Computes the gradient of the negative log-likelihood of target with respect to the layer's bias, given the internal activations.
Reimplemented from PLearn::RBMLayer.
Definition at line 377 of file RBMMultinomialLayer.cc.
References computeExpectation(), PLearn::RBMLayer::expectation, PLearn::OnlineLearningModule::input_size, PLASSERT, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), PLearn::RBMLayer::size, and PLearn::substract().
{
computeExpectation();
PLASSERT( target.size() == input_size );
bias_gradient.resize( size );
// bias_gradient = expectation - target
substract(expectation, target, bias_gradient);
}

| void PLearn::RBMMultinomialLayer::bpropNLL | ( | const Mat & | targets, |
| const Mat & | costs_column, | ||
| Mat & | bias_gradients | ||
| ) | [virtual] |
Reimplemented from PLearn::RBMLayer.
Definition at line 389 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::batch_size, computeExpectations(), PLearn::RBMLayer::expectations, PLearn::OnlineLearningModule::input_size, PLearn::TMat< T >::length(), PLASSERT, PLearn::TMat< T >::resize(), PLearn::RBMLayer::size, PLearn::substract(), and PLearn::TMat< T >::width().
{
computeExpectations();
PLASSERT( targets.width() == input_size );
PLASSERT( targets.length() == batch_size );
PLASSERT( costs_column.width() == 1 );
PLASSERT( costs_column.length() == batch_size );
bias_gradients.resize( batch_size, size );
// bias_gradients = expectations - targets
substract(expectations, targets, bias_gradients);
}

| void PLearn::RBMMultinomialLayer::bpropUpdate | ( | const Vec & | input, |
| const Vec & | output, | ||
| Vec & | input_gradient, | ||
| const Vec & | output_gradient | ||
| ) | [virtual] |
back-propagates the output gradient to the input
Implements PLearn::RBMLayer.
Definition at line 100 of file DEPRECATED/RBMMultinomialLayer.cc.
References PLERROR.
{
PLERROR( "RBMMultinomialLayer::bpropUpdate not implemented yet." );
}
| void PLearn::RBMMultinomialLayer::bpropUpdate | ( | const Vec & | input, |
| const Vec & | output, | ||
| Vec & | input_gradient, | ||
| const Vec & | output_gradient, | ||
| bool | accumulate = false |
||
| ) | [virtual] |
back-propagates the output gradient to the input
Implements PLearn::RBMLayer.
Definition at line 152 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::applyBiasDecay(), b, PLearn::RBMLayer::bias, PLearn::RBMLayer::bias_inc, PLearn::TVec< T >::clear(), PLearn::TVec< T >::data(), PLearn::dot(), i, PLearn::RBMLayer::learning_rate, PLearn::RBMLayer::momentum, PLASSERT, PLASSERT_MSG, PLearn::TVec< T >::resize(), PLearn::RBMLayer::size, and PLearn::TVec< T >::size().
{
PLASSERT( input.size() == size );
PLASSERT( output.size() == size );
PLASSERT( output_gradient.size() == size );
if( accumulate )
{
PLASSERT_MSG( input_gradient.size() == size,
"Cannot resize input_gradient AND accumulate into it" );
}
else
{
input_gradient.resize( size );
input_gradient.clear();
}
if( momentum != 0. )
bias_inc.resize( size );
// input_gradient[i] =
// (output_gradient[i] - output_gradient . output) output[i]
real outg_dot_out = dot( output_gradient, output );
real* out = output.data();
real* outg = output_gradient.data();
real* ing = input_gradient.data();
real* b = bias.data();
real* binc = momentum==0?0:bias_inc.data();
for( int i=0 ; i<size ; i++ )
{
real ing_i = (outg[i] - outg_dot_out) * out[i];
ing[i] += ing_i;
if( momentum == 0. )
{
// update the bias: bias -= learning_rate * input_gradient
b[i] -= learning_rate * ing_i;
}
else
{
// The update rule becomes:
// bias_inc = momentum * bias_inc - learning_rate * input_gradient
// bias += bias_inc
binc[i] = momentum * binc[i] - learning_rate * ing_i;
b[i] += binc[i];
}
}
applyBiasDecay();
}

| void PLearn::RBMMultinomialLayer::bpropUpdate | ( | const Mat & | inputs, |
| const Mat & | outputs, | ||
| Mat & | input_gradients, | ||
| const Mat & | output_gradients, | ||
| bool | accumulate = false |
||
| ) | [virtual] |
Back-propagate the output gradient to the input, and update parameters.
Implements PLearn::RBMLayer.
Definition at line 206 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::applyBiasDecay(), b, PLearn::RBMLayer::bias, PLearn::RBMLayer::bias_inc, PLearn::TMat< T >::clear(), PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), PLearn::dot(), i, PLearn::RBMLayer::learning_rate, PLearn::TMat< T >::length(), PLearn::RBMLayer::momentum, PLASSERT, PLASSERT_MSG, PLCHECK_MSG, PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), PLearn::RBMLayer::size, and PLearn::TMat< T >::width().
{
PLASSERT( inputs.width() == size );
PLASSERT( outputs.width() == size );
PLASSERT( output_gradients.width() == size );
int mbatch_size = inputs.length();
PLASSERT( outputs.length() == mbatch_size );
PLASSERT( output_gradients.length() == mbatch_size );
if( accumulate )
{
PLASSERT_MSG( input_gradients.width() == size &&
input_gradients.length() == inputs.length(),
"Cannot resize input_gradient and accumulate into it." );
}
else
{
input_gradients.resize(inputs.length(), size);
input_gradients.clear();
}
if( momentum != 0. )
bias_inc.resize( size );
// TODO see if we can have a speed-up by reorganizing the different steps
// input_gradients[k][i] =
// (output_gradients[k][i]-output_gradients[k].outputs[k]) outputs[k][i]
real mean_lr = learning_rate / mbatch_size;
for( int k=0; k<mbatch_size; k++ )
{
real outg_dot_out = dot( output_gradients(k), outputs(k) );
real* out = outputs(k).data();
real* outg = output_gradients(k).data();
real* ing = input_gradients(k).data();
real* b = bias.data();
real* binc = momentum==0?0:bias_inc.data();
for( int i=0 ; i<size ; i++ )
{
real ing_ki = (outg[i] - outg_dot_out) * out[i];
ing[i] += ing_ki;
if( momentum == 0. )
{
// update the bias: bias -= learning_rate * input_gradient
b[i] -= mean_lr * ing_ki;
}
else
{
PLCHECK_MSG(false,
"Momentum not correctly implemented with batch");
// The update rule becomes:
// bias_inc = momentum*bias_inc - learning_rate*input_gradient
// bias += bias_inc
binc[i] = momentum * binc[i] - mean_lr * ing_ki;
b[i] += binc[i];
}
}
}
applyBiasDecay();
}

| void PLearn::RBMMultinomialLayer::bpropUpdate | ( | const Vec & | input, |
| const Vec & | rbm_bias, | ||
| const Vec & | output, | ||
| Vec & | input_gradient, | ||
| Vec & | rbm_bias_gradient, | ||
| const Vec & | output_gradient | ||
| ) | [virtual] |
back-propagates the output gradient to the input and the bias
TODO: add "accumulate" here.
Reimplemented from PLearn::RBMLayer.
Definition at line 275 of file RBMMultinomialLayer.cc.
References PLearn::TVec< T >::data(), PLearn::dot(), i, PLASSERT, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), and PLearn::RBMLayer::size.
{
PLASSERT( input.size() == size );
PLASSERT( rbm_bias.size() == size );
PLASSERT( output.size() == size );
PLASSERT( output_gradient.size() == size );
input_gradient.resize( size );
rbm_bias_gradient.resize( size );
// input_gradient[i] =
// (output_gradient . output - output_gradient[i] ) output[i]
real outg_dot_out = dot( output_gradient, output );
real* out = output.data();
real* outg = output_gradient.data();
real* ing = input_gradient.data();
for( int i=0 ; i<size ; i++ )
ing[i] = (outg[i] - outg_dot_out) * out[i];
rbm_bias_gradient << input_gradient;
}

| void PLearn::RBMMultinomialLayer::build | ( | ) | [virtual] |
Post-constructor.
The normal implementation should call simply inherited::build(), then this class's build_(). This method should be callable again at later times, after modifying some option fields to change the "architecture" of the object.
Reimplemented from PLearn::RBMLayer.
Definition at line 132 of file DEPRECATED/RBMMultinomialLayer.cc.
{
inherited::build();
build_();
}
| virtual void PLearn::RBMMultinomialLayer::build | ( | ) | [virtual] |
Post-constructor.
The normal implementation should call simply inherited::build(), then this class's build_(). This method should be callable again at later times, after modifying some option fields to change the "architecture" of the object.
Reimplemented from PLearn::RBMLayer.
| void PLearn::RBMMultinomialLayer::build_ | ( | ) | [private] |
This does the actual building.
Reimplemented from PLearn::RBMLayer.
Definition at line 119 of file DEPRECATED/RBMMultinomialLayer.cc.
References PLearn::TVec< T >::resize(), and PLearn::sample().
{
if( size < 0 )
size = int(units_types.size());
if( size != (int) units_types.size() )
units_types = string( size, 'l' );
activations.resize( size );
sample.resize( size );
expectation.resize( size );
expectation_is_up_to_date = false;
}

| void PLearn::RBMMultinomialLayer::build_ | ( | ) | [private] |
This does the actual building.
Reimplemented from PLearn::RBMLayer.
| virtual string PLearn::RBMMultinomialLayer::classname | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
| string PLearn::RBMMultinomialLayer::classname | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| void PLearn::RBMMultinomialLayer::computeExpectation | ( | ) | [virtual] |
compute the expectation
Implements PLearn::RBMLayer.
Definition at line 90 of file DEPRECATED/RBMMultinomialLayer.cc.
References PLearn::softmaxMinus().
Referenced by bpropNLL(), and fpropNLL().
{
if( expectation_is_up_to_date )
return;
// expectation = softmax(-activations)
softmaxMinus(activations, expectation);
expectation_is_up_to_date = true;
}


| virtual void PLearn::RBMMultinomialLayer::computeExpectation | ( | ) | [virtual] |
compute the expectation
Implements PLearn::RBMLayer.
| void PLearn::RBMMultinomialLayer::computeExpectations | ( | ) | [virtual] |
batch version
Implements PLearn::RBMLayer.
Definition at line 110 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::activations, PLearn::RBMLayer::batch_size, PLearn::RBMLayer::expectations, PLearn::RBMLayer::expectations_are_up_to_date, PLearn::TMat< T >::length(), PLASSERT, PLearn::RBMLayer::size, PLearn::softmax(), and PLearn::TMat< T >::width().
Referenced by bpropNLL(), and fpropNLL().
{
if( expectations_are_up_to_date )
return;
PLASSERT( expectations.width() == size
&& expectations.length() == batch_size );
// expectation = softmax(-activation)
for (int k = 0; k < batch_size; k++)
softmax(activations(k), expectations(k));
expectations_are_up_to_date = true;
}


| static void PLearn::RBMMultinomialLayer::declareOptions | ( | OptionList & | ol | ) | [static, protected] |
Declares the class options.
Reimplemented from PLearn::RBMLayer.
| void PLearn::RBMMultinomialLayer::declareOptions | ( | OptionList & | ol | ) | [static, protected] |
Declares the class options.
Reimplemented from PLearn::RBMLayer.
Definition at line 108 of file DEPRECATED/RBMMultinomialLayer.cc.
{
/*
declareOption(ol, "size", &RBMMultinomialLayer::size,
OptionBase::buildoption,
"Number of units.");
*/
// Now call the parent class' declareOptions
inherited::declareOptions(ol);
}
| static const PPath& PLearn::RBMMultinomialLayer::declaringFile | ( | ) | [inline, static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 98 of file DEPRECATED/RBMMultinomialLayer.h.
:
//##### Not Options #####################################################
| static const PPath& PLearn::RBMMultinomialLayer::declaringFile | ( | ) | [inline, static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 145 of file RBMMultinomialLayer.h.
:
//##### Not Options #####################################################
| virtual RBMMultinomialLayer* PLearn::RBMMultinomialLayer::deepCopy | ( | CopiesMap & | copies | ) | const [virtual] |
Reimplemented from PLearn::RBMLayer.
| RBMMultinomialLayer * PLearn::RBMMultinomialLayer::deepCopy | ( | CopiesMap & | copies | ) | const [virtual] |
Reimplemented from PLearn::RBMLayer.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
Reimplemented from PLearn::RBMLayer.
Definition at line 432 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::bias, and PLearn::dot().

forward propagation
Reimplemented from PLearn::RBMLayer.
Definition at line 126 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::bias, PLearn::OnlineLearningModule::input_size, PLearn::OnlineLearningModule::output_size, PLASSERT, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), and PLearn::softmax().
{
PLASSERT( input.size() == input_size );
output.resize( output_size );
// inefficient
softmax( input+bias, output );
}

| void PLearn::RBMMultinomialLayer::fprop | ( | const Vec & | input, |
| const Vec & | rbm_bias, | ||
| Vec & | output | ||
| ) | const [virtual] |
forward propagation with provided bias
Reimplemented from PLearn::RBMLayer.
Definition at line 138 of file RBMMultinomialLayer.cc.
References PLearn::OnlineLearningModule::input_size, PLearn::OnlineLearningModule::output_size, PLASSERT, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), and PLearn::softmax().
{
PLASSERT( input.size() == input_size );
PLASSERT( rbm_bias.size() == input_size );
output.resize( output_size );
// inefficient
softmax( input+rbm_bias, output );
}

| void PLearn::RBMMultinomialLayer::fpropNLL | ( | const Mat & | targets, |
| const Mat & | costs_column | ||
| ) | [virtual] |
Reimplemented from PLearn::RBMLayer.
Definition at line 336 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::batch_size, computeExpectations(), PLearn::RBMLayer::expectation, PLearn::RBMLayer::expectations, PLearn::fast_exact_is_equal(), PLearn::TMat< T >::hasMissing(), i, PLearn::OnlineLearningModule::input_size, PLearn::is_equal(), PLearn::TMat< T >::length(), PLearn::min(), pl_log, PLASSERT, PLASSERT_MSG, PLERROR, PLearn::RBMLayer::size, PLearn::sum(), and PLearn::TMat< T >::width().
{
computeExpectations();
PLASSERT( targets.width() == input_size );
PLASSERT( targets.length() == batch_size );
PLASSERT( costs_column.width() == 1 );
PLASSERT( costs_column.length() == batch_size );
real target_i, expectation_i;
for (int k=0; k<batch_size; k++) // loop over minibatch
{
#ifdef BOUNDCHECK
if (!targets(k).hasMissing())
{
PLASSERT_MSG( min(targets(k)) >= 0.,
"Elements of \"targets\" should be positive" );
// Ensure the distribution probabilities sum to 1. We relax a
// bit the default tolerance as probabilities using
// exponentials could suffer numerical imprecisions.
if (!is_equal( sum(targets(k)), 1., 1., 1e-5, 1e-5 ))
PLERROR("In RBMMultinomialLayer::fpropNLL - Elements of"
" \"target\" should sum to 1 (found a sum = %f at row"
" %d)",
sum(targets(k)), k);
}
#endif
real nll = 0;
real* expectation = expectations[k];
real* target = targets[k];
for(int i=0; i<size; i++)
{
target_i = target[i];
expectation_i = expectation[i];
if(!fast_exact_is_equal(target_i, 0.0))
nll -= target_i * pl_log(expectation_i);
}
costs_column(k, 0) = nll;
}
}

Computes the negative log-likelihood of target given the internal activations of the layer.
Reimplemented from PLearn::RBMLayer.
Definition at line 303 of file RBMMultinomialLayer.cc.
References computeExpectation(), PLearn::RBMLayer::expectation, PLearn::fast_exact_is_equal(), PLearn::TVec< T >::hasMissing(), i, PLearn::OnlineLearningModule::input_size, PLearn::is_equal(), PLearn::min(), pl_log, PLASSERT, PLASSERT_MSG, PLERROR, PLearn::TVec< T >::size(), PLearn::RBMLayer::size, and PLearn::sum().
{
computeExpectation();
PLASSERT( target.size() == input_size );
#ifdef BOUNDCHECK
if (!target.hasMissing())
{
PLASSERT_MSG( min(target) >= 0.,
"Elements of \"target\" should be positive" );
// Ensure the distribution probabilities sum to 1. We relax a
// bit the default tolerance as probabilities using
// exponentials could suffer numerical imprecisions.
if (!is_equal( sum(target), 1., 1., 1e-5, 1e-5 ))
PLERROR("In RBMMultinomialLayer::fpropNLL - Elements of \"target\""
" should sum to 1 (found a sum = %f)",
sum(target));
}
#endif
real nll = 0;
real target_i, expectation_i;
for (int i=0; i<size; i++)
{
target_i = target[i];
expectation_i = expectation[i];
if(!fast_exact_is_equal(target_i, 0.0))
nll -= target_i * pl_log(expectation_i);
}
return nll;
}

| real PLearn::RBMMultinomialLayer::freeEnergyContribution | ( | const Vec & | unit_activations | ) | const [virtual] |
Computes
This quantity is used for computing the free energy of a sample x in the OTHER layer of an RBM, from which unit_activations was computed.
Reimplemented from PLearn::RBMLayer.
Definition at line 437 of file RBMMultinomialLayer.cc.
References PLearn::logadd().
{
// result = -log(\sum_{i=0}^{size-1} exp(a_i))
return -logadd(unit_activations);
}

| void PLearn::RBMMultinomialLayer::freeEnergyContributionGradient | ( | const Vec & | unit_activations, |
| Vec & | unit_activations_gradient, | ||
| real | output_gradient = 1, |
||
| bool | accumulate = false |
||
| ) | const [virtual] |
Computes gradient of the result of freeEnergyContribution
with respect to unit_activations.
Optionally, a gradient with respect to freeEnergyContribution can be given
Reimplemented from PLearn::RBMLayer.
Definition at line 444 of file RBMMultinomialLayer.cc.
References PLearn::TVec< T >::clear(), PLearn::TVec< T >::data(), PLASSERT, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), PLearn::RBMLayer::size, PLearn::softmax(), and tmp_softmax.
{
PLASSERT( unit_activations.size() == size );
unit_activations_gradient.resize( size );
if( !accumulate ) unit_activations_gradient.clear();
tmp_softmax.resize( size );
softmax(unit_activations, tmp_softmax);
real* ga = unit_activations_gradient.data();
real* s = tmp_softmax.data();
for (int i=0; i<size; i++)
ga[i] -= output_gradient * s[i];
}

| void PLearn::RBMMultinomialLayer::generateSample | ( | ) | [virtual] |
generate a sample, and update the sample field
Implements PLearn::RBMLayer.
Definition at line 82 of file DEPRECATED/RBMMultinomialLayer.cc.
References PLearn::fill_one_hot(), i, and PLearn::sample().
{
computeExpectation();
int i = random_gen->multinomial_sample( expectation );
fill_one_hot( sample, i, 0., 1. );
}

| virtual void PLearn::RBMMultinomialLayer::generateSample | ( | ) | [virtual] |
generate a sample, and update the sample field
Implements PLearn::RBMLayer.
| void PLearn::RBMMultinomialLayer::generateSamples | ( | ) | [virtual] |
batch version
Implements PLearn::RBMLayer.
Definition at line 83 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::batch_size, PLearn::RBMLayer::expectations, PLearn::RBMLayer::expectations_are_up_to_date, PLearn::fill_one_hot(), PLearn::TMat< T >::length(), PLASSERT, PLASSERT_MSG, PLCHECK_MSG, PLearn::RBMLayer::random_gen, PLearn::RBMLayer::samples, PLearn::RBMLayer::size, and PLearn::TMat< T >::width().
{
PLASSERT_MSG(random_gen,
"random_gen should be initialized before generating samples");
PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed "
"before calling generateSamples()");
PLASSERT( samples.width() == size && samples.length() == batch_size );
for (int k = 0; k < batch_size; k++)
{
int i = random_gen->multinomial_sample( expectations(k) );
fill_one_hot( samples(k), i, real(0.), real(1.) );
}
}

| void PLearn::RBMMultinomialLayer::getAllActivations | ( | PP< RBMParameters > | rbmp, |
| int | offset = 0 |
||
| ) | [virtual] |
Uses "rbmp" to obtain the activations of all units in this layer.
Unit 0 of this layer corresponds to unit "offset" of "rbmp".
Implements PLearn::RBMLayer.
Definition at line 75 of file DEPRECATED/RBMMultinomialLayer.cc.
{
rbmp->computeUnitActivations( offset, size, activations );
expectation_is_up_to_date = false;
}
Computes the conf_index configuration of the layer.
Reimplemented from PLearn::RBMLayer.
Definition at line 465 of file RBMMultinomialLayer.cc.
References getConfigurationCount(), i, PLearn::TVec< T >::length(), PLASSERT, and PLearn::RBMLayer::size.
{
PLASSERT( output.length() == size );
PLASSERT( conf_index >= 0 && conf_index < getConfigurationCount() );
for ( int i = 0; i < size; ++i ) {
output[i] = i == conf_index ? 1 : 0;
}
}

| int PLearn::RBMMultinomialLayer::getConfigurationCount | ( | ) | [virtual] |
Returns a number of different configurations the layer can be in.
Reimplemented from PLearn::RBMLayer.
Definition at line 460 of file RBMMultinomialLayer.cc.
References PLearn::RBMLayer::size.
Referenced by getConfiguration().
{
return size;
}

| virtual OptionList& PLearn::RBMMultinomialLayer::getOptionList | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
| OptionList & PLearn::RBMMultinomialLayer::getOptionList | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| OptionMap & PLearn::RBMMultinomialLayer::getOptionMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| virtual OptionMap& PLearn::RBMMultinomialLayer::getOptionMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
| virtual RemoteMethodMap& PLearn::RBMMultinomialLayer::getRemoteMethodMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
| RemoteMethodMap & PLearn::RBMMultinomialLayer::getRemoteMethodMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 49 of file DEPRECATED/RBMMultinomialLayer.cc.
| void PLearn::RBMMultinomialLayer::getUnitActivations | ( | int | i, |
| PP< RBMParameters > | rbmp, | ||
| int | offset = 0 |
||
| ) | [virtual] |
Uses "rbmp" to obtain the activations of unit "i" of this layer.
This activation vector is computed by the "i+offset"-th unit of "rbmp"
Implements PLearn::RBMLayer.
Definition at line 67 of file DEPRECATED/RBMMultinomialLayer.cc.
References PLearn::TVec< T >::subVec().
{
Vec activation = activations.subVec( i, 1 );
rbmp->computeUnitActivations( i+offset, 1, activation );
expectation_is_up_to_date = false;
}

| virtual void PLearn::RBMMultinomialLayer::makeDeepCopyFromShallowCopy | ( | CopiesMap & | copies | ) | [virtual] |
Transforms a shallow copy into a deep copy.
Reimplemented from PLearn::RBMLayer.
| void PLearn::RBMMultinomialLayer::makeDeepCopyFromShallowCopy | ( | CopiesMap & | copies | ) | [virtual] |
Transforms a shallow copy into a deep copy.
Reimplemented from PLearn::RBMLayer.
Definition at line 139 of file DEPRECATED/RBMMultinomialLayer.cc.
{
inherited::makeDeepCopyFromShallowCopy(copies);
}
static StaticInitializer PLearn::RBMMultinomialLayer::_static_initializer_ [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 98 of file DEPRECATED/RBMMultinomialLayer.h.
Vec PLearn::RBMMultinomialLayer::tmp_softmax [mutable, protected] |
Definition at line 155 of file RBMMultinomialLayer.h.
Referenced by freeEnergyContributionGradient().
1.7.4