PLearn 0.1
|
RBM layer with tree-structured groups of units. More...
#include <RBMWoodsLayer.h>
Public Member Functions | |
RBMWoodsLayer (real the_learning_rate=0.) | |
Default constructor. | |
virtual void | generateSample () |
generate a sample, and update the sample field | |
virtual void | generateSamples () |
Inherited. | |
virtual void | computeProbabilisticClustering (Vec &prob_clusters) |
virtual void | computeExpectation () |
Compute marginal expectations of all units. | |
virtual void | computeExpectations () |
Compute marginal mini-batch expectations of all units. | |
virtual void | fprop (const Vec &input, Vec &output) const |
forward propagation | |
virtual void | fprop (const Mat &inputs, Mat &outputs) |
Batch forward propagation. | |
virtual void | fprop (const Vec &input, const Vec &rbm_bias, Vec &output) const |
forward propagation with provided bias | |
virtual void | bpropUpdate (const Vec &input, const Vec &output, Vec &input_gradient, const Vec &output_gradient, bool accumulate=false) |
back-propagates the output gradient to the input | |
virtual void | bpropUpdate (const Vec &input, const Vec &rbm_bias, const Vec &output, Vec &input_gradient, Vec &rbm_bias_gradient, const Vec &output_gradient) |
back-propagates the output gradient to the input and the bias | |
virtual void | bpropUpdate (const Mat &inputs, const Mat &outputs, Mat &input_gradients, const Mat &output_gradients, bool accumulate=false) |
Back-propagate the output gradient to the input, and update parameters. | |
virtual real | fpropNLL (const Vec &target) |
Computes the negative log-likelihood of target given the internal activations of the layer. | |
virtual void | fpropNLL (const Mat &targets, const Mat &costs_column) |
virtual void | bpropNLL (const Vec &target, real nll, Vec &bias_gradient) |
Computes the gradient of the negative log-likelihood of target with respect to the layer's bias, given the internal activations. | |
virtual void | bpropNLL (const Mat &targets, const Mat &costs_column, Mat &bias_gradients) |
virtual real | energy (const Vec &unit_values) const |
compute -bias' unit_values | |
virtual real | freeEnergyContribution (const Vec &unit_activations) const |
Computes ![]() | |
virtual void | freeEnergyContributionGradient (const Vec &unit_activations, Vec &unit_activations_gradient, real output_gradient=1, bool accumulate=false) const |
Computes gradient of the result of freeEnergyContribution ![]() | |
virtual int | getConfigurationCount () |
Returns a number of different configurations the layer can be in. | |
virtual void | getConfiguration (int conf_index, Vec &output) |
Computes the conf_index configuration of the layer. | |
virtual string | classname () const |
virtual OptionList & | getOptionList () const |
virtual OptionMap & | getOptionMap () const |
virtual RemoteMethodMap & | getRemoteMethodMap () const |
virtual RBMWoodsLayer * | deepCopy (CopiesMap &copies) const |
virtual void | build () |
Post-constructor. | |
virtual void | makeDeepCopyFromShallowCopy (CopiesMap &copies) |
Transforms a shallow copy into a deep copy. | |
Static Public Member Functions | |
static string | _classname_ () |
static OptionList & | _getOptionList_ () |
static RemoteMethodMap & | _getRemoteMethodMap_ () |
static Object * | _new_instance_for_typemap_ () |
static bool | _isa_ (const Object *o) |
static void | _static_initialize_ () |
static const PPath & | declaringFile () |
Public Attributes | |
int | n_trees |
Number of trees in the woods. | |
int | tree_depth |
Depth of the trees in the woods (1 gives the ordinary RBMBinomialLayer) | |
bool | use_signed_samples |
Indication that samples should be in {-1,1}, not {0,1}, at nodes where a left/right decision is made. | |
Static Public Attributes | |
static StaticInitializer | _static_initializer_ |
Static Protected Member Functions | |
static void | declareOptions (OptionList &ol) |
Declares the class options. | |
Protected Attributes | |
Vec | off_expectation |
Mat | off_expectations |
Vec | local_node_expectation |
Mat | local_node_expectations |
Vec | on_free_energy |
Mat | on_free_energies |
Vec | off_free_energy |
Mat | off_free_energies |
Vec | local_node_expectation_gradient |
Vec | on_tree_gradient |
Vec | off_tree_gradient |
Vec | on_free_energy_gradient |
Vec | off_free_energy_gradient |
Vec | tree_free_energies |
Vec | tree_energies |
Vec | unit_activations_pos_gradient |
Vec | unit_activations_neg_gradient |
TVec< bool > | unit_activations_pos_gradient_init |
TVec< bool > | unit_activations_neg_gradient_init |
Private Types | |
typedef RBMLayer | inherited |
Private Member Functions | |
void | build_ () |
This does the actual building. |
RBM layer with tree-structured groups of units.
Definition at line 52 of file RBMWoodsLayer.h.
typedef RBMLayer PLearn::RBMWoodsLayer::inherited [private] |
Reimplemented from PLearn::RBMLayer.
Definition at line 54 of file RBMWoodsLayer.h.
PLearn::RBMWoodsLayer::RBMWoodsLayer | ( | real | the_learning_rate = 0. | ) |
Default constructor.
Definition at line 53 of file RBMWoodsLayer.cc.
: inherited( the_learning_rate ), n_trees( 10 ), tree_depth( 3 ), use_signed_samples( false ) { }
string PLearn::RBMWoodsLayer::_classname_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 51 of file RBMWoodsLayer.cc.
OptionList & PLearn::RBMWoodsLayer::_getOptionList_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 51 of file RBMWoodsLayer.cc.
RemoteMethodMap & PLearn::RBMWoodsLayer::_getRemoteMethodMap_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 51 of file RBMWoodsLayer.cc.
Reimplemented from PLearn::RBMLayer.
Definition at line 51 of file RBMWoodsLayer.cc.
Object * PLearn::RBMWoodsLayer::_new_instance_for_typemap_ | ( | ) | [static] |
Reimplemented from PLearn::Object.
Definition at line 51 of file RBMWoodsLayer.cc.
StaticInitializer RBMWoodsLayer::_static_initializer_ & PLearn::RBMWoodsLayer::_static_initialize_ | ( | ) | [static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 51 of file RBMWoodsLayer.cc.
void PLearn::RBMWoodsLayer::bpropNLL | ( | const Vec & | target, |
real | nll, | ||
Vec & | bias_gradient | ||
) | [virtual] |
Computes the gradient of the negative log-likelihood of target with respect to the layer's bias, given the internal activations.
Reimplemented from PLearn::RBMLayer.
Definition at line 1153 of file RBMWoodsLayer.cc.
References computeExpectation(), PLearn::RBMLayer::expectation, PLearn::OnlineLearningModule::input_size, PLASSERT, PLERROR, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), PLearn::RBMLayer::size, and PLearn::substract().
{ PLERROR( "RBMWoodsLayer::bpropNLL(): not implemeted yet" ); computeExpectation(); PLASSERT( target.size() == input_size ); bias_gradient.resize( size ); // bias_gradient = expectation - target substract(expectation, target, bias_gradient); }
void PLearn::RBMWoodsLayer::bpropNLL | ( | const Mat & | targets, |
const Mat & | costs_column, | ||
Mat & | bias_gradients | ||
) | [virtual] |
Reimplemented from PLearn::RBMLayer.
Definition at line 1165 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::batch_size, computeExpectations(), PLearn::RBMLayer::expectations, PLearn::OnlineLearningModule::input_size, PLearn::TMat< T >::length(), PLASSERT, PLERROR, PLearn::TMat< T >::resize(), PLearn::RBMLayer::size, PLearn::substract(), and PLearn::TMat< T >::width().
{ PLERROR( "RBMWoodsLayer::bpropNLL(): not implemeted yet" ); computeExpectations(); PLASSERT( targets.width() == input_size ); PLASSERT( targets.length() == batch_size ); PLASSERT( costs_column.width() == 1 ); PLASSERT( costs_column.length() == batch_size ); bias_gradients.resize( batch_size, size ); // bias_gradients = expectations - targets substract(expectations, targets, bias_gradients); }
void PLearn::RBMWoodsLayer::bpropUpdate | ( | const Mat & | inputs, |
const Mat & | outputs, | ||
Mat & | input_gradients, | ||
const Mat & | output_gradients, | ||
bool | accumulate = false |
||
) | [virtual] |
Back-propagate the output gradient to the input, and update parameters.
Implements PLearn::RBMLayer.
Definition at line 980 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::applyBiasDecay(), PLearn::RBMLayer::bias, PLearn::RBMLayer::bias_inc, PLearn::TMat< T >::clear(), i, j, PLearn::RBMLayer::learning_rate, PLearn::TMat< T >::length(), PLearn::RBMLayer::momentum, PLASSERT, PLASSERT_MSG, PLERROR, PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), PLearn::RBMLayer::size, and PLearn::TMat< T >::width().
{ PLASSERT( inputs.width() == size ); PLASSERT( outputs.width() == size ); PLASSERT( output_gradients.width() == size ); int mbatch_size = inputs.length(); PLASSERT( outputs.length() == mbatch_size ); PLASSERT( output_gradients.length() == mbatch_size ); if( accumulate ) { PLASSERT_MSG( input_gradients.width() == size && input_gradients.length() == mbatch_size, "Cannot resize input_gradients and accumulate into it" ); } else { input_gradients.resize(mbatch_size, size); input_gradients.clear(); } PLERROR( "RBMWoodsLayer::bpropUpdate(): not implemeted yet" ); if( momentum != 0. ) bias_inc.resize( size ); // TODO Can we do this more efficiently? (using BLAS) // We use the average gradient over the mini-batch. real avg_lr = learning_rate / inputs.length(); for (int j = 0; j < mbatch_size; j++) { for( int i=0 ; i<size ; i++ ) { real output_i = outputs(j, i); real in_grad_i = output_i * (1-output_i) * output_gradients(j, i); input_gradients(j, i) += in_grad_i; if( momentum == 0. ) { // update the bias: bias -= learning_rate * input_gradient bias[i] -= avg_lr * in_grad_i; } else { PLERROR("In RBMWoodsLayer:bpropUpdate - Not implemented for " "momentum with mini-batches"); // The update rule becomes: // bias_inc = momentum * bias_inc - learning_rate * input_gradient // bias += bias_inc bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i; bias[i] += bias_inc[i]; } } } applyBiasDecay(); }
void PLearn::RBMWoodsLayer::bpropUpdate | ( | const Vec & | input, |
const Vec & | rbm_bias, | ||
const Vec & | output, | ||
Vec & | input_gradient, | ||
Vec & | rbm_bias_gradient, | ||
const Vec & | output_gradient | ||
) | [virtual] |
back-propagates the output gradient to the input and the bias
TODO: add "accumulate" here.
Reimplemented from PLearn::RBMLayer.
Definition at line 1046 of file RBMWoodsLayer.cc.
References i, PLASSERT, PLERROR, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), and PLearn::RBMLayer::size.
{ PLASSERT( input.size() == size ); PLASSERT( rbm_bias.size() == size ); PLASSERT( output.size() == size ); PLASSERT( output_gradient.size() == size ); input_gradient.resize( size ); rbm_bias_gradient.resize( size ); PLERROR( "RBMWoodsLayer::bpropUpdate(): not implemeted yet" ); for( int i=0 ; i<size ; i++ ) { real output_i = output[i]; input_gradient[i] = output_i * (1-output_i) * output_gradient[i]; } rbm_bias_gradient << input_gradient; }
void PLearn::RBMWoodsLayer::bpropUpdate | ( | const Vec & | input, |
const Vec & | output, | ||
Vec & | input_gradient, | ||
const Vec & | output_gradient, | ||
bool | accumulate = false |
||
) | [virtual] |
back-propagates the output gradient to the input
Implements PLearn::RBMLayer.
Definition at line 717 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::applyBiasDecay(), PLearn::RBMLayer::bias, PLearn::RBMLayer::bias_inc, PLearn::TVec< T >::clear(), i, PLearn::RBMLayer::learning_rate, local_node_expectation, local_node_expectation_gradient, PLearn::RBMLayer::momentum, n, n_trees, off_expectation, off_free_energy_gradient, off_tree_gradient, on_free_energy_gradient, on_tree_gradient, PLASSERT, PLASSERT_MSG, PLearn::TVec< T >::resize(), PLearn::RBMLayer::size, PLearn::TVec< T >::size(), tree_depth, and use_signed_samples.
{ PLASSERT( input.size() == size ); PLASSERT( output.size() == size ); PLASSERT( output_gradient.size() == size ); if( accumulate ) { PLASSERT_MSG( input_gradient.size() == size, "Cannot resize input_gradient AND accumulate into it" ); } else { input_gradient.resize( size ); input_gradient.clear(); } // Compute gradient on marginal expectations int n_nodes_per_tree = size / n_trees; int node, depth, sub_tree_size, grand_parent; int offset = 0; bool left_of_grand_parent; real grand_parent_prob; real node_exp, parent_exp, out_grad, off_grad; local_node_expectation_gradient.clear(); on_tree_gradient.clear(); off_tree_gradient.clear(); for( int t=0; t<n_trees; t++ ) { // Set other nodes, level-wise depth = tree_depth-1; sub_tree_size = 0; while( depth > 1 ) { // Left child left_of_grand_parent = true; for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { out_grad = output_gradient[ n + offset ] + on_tree_gradient[ n + offset ] ; off_grad = off_tree_gradient[ n + offset ] ; node_exp = local_node_expectation[ n + offset ]; parent_exp = local_node_expectation[ n + offset + sub_tree_size + 1 ]; if( left_of_grand_parent ) { grand_parent = n + offset + 3*sub_tree_size + 3; if( use_signed_samples ) grand_parent_prob = output[ grand_parent ] + off_expectation[grand_parent]; else grand_parent_prob = output[ grand_parent ]; // Gradient for rest of the tree on_tree_gradient[ grand_parent ] += ( out_grad * node_exp + off_grad * (1 - node_exp) ) * parent_exp; left_of_grand_parent = false; } else { grand_parent = n + offset - sub_tree_size - 1; grand_parent_prob = off_expectation[ grand_parent ]; // Gradient for rest of the tree off_tree_gradient[ grand_parent ] += ( out_grad * node_exp + off_grad * (1 - node_exp) ) * parent_exp; left_of_grand_parent = true; } // Gradient w/r current node local_node_expectation_gradient[ n + offset ] += ( out_grad - off_grad ) * parent_exp * grand_parent_prob; //* node_exp * ( 1 - node_exp ); // Gradient w/r parent node local_node_expectation_gradient[ n + offset + sub_tree_size + 1 ] += ( out_grad * node_exp + off_grad * (1 - node_exp) ) * grand_parent_prob; //* parent_exp * (1-parent_exp) ; } // Right child left_of_grand_parent = true; for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { out_grad = output_gradient[ n + offset ] + on_tree_gradient[ n + offset ] ; off_grad = off_tree_gradient[ n + offset ] ; node_exp = local_node_expectation[ n + offset ]; parent_exp = local_node_expectation[ n + offset - sub_tree_size - 1 ]; if( left_of_grand_parent ) { grand_parent = n + offset + sub_tree_size + 1; if( use_signed_samples ) grand_parent_prob = output[ grand_parent ] + off_expectation[ grand_parent ]; else grand_parent_prob = output[ grand_parent ]; // Gradient for rest of the tree on_tree_gradient[ grand_parent ] += ( out_grad * node_exp + off_grad * (1 - node_exp) ) * ( 1 - parent_exp ); left_of_grand_parent = false; } else { grand_parent = n + offset - 3*sub_tree_size - 3; grand_parent_prob = off_expectation[ grand_parent ]; // Gradient for rest of the tree off_tree_gradient[ grand_parent ] += ( out_grad * node_exp + off_grad * (1 - node_exp) ) * ( 1 - parent_exp ); left_of_grand_parent = true; } // Gradient w/r current node local_node_expectation_gradient[ n + offset ] += ( out_grad - off_grad ) * ( 1 - parent_exp ) * grand_parent_prob; //* node_exp * ( 1 - node_exp ); // Gradient w/r parent node local_node_expectation_gradient[ n + offset - sub_tree_size - 1 ] -= ( out_grad * node_exp + off_grad * (1 - node_exp) ) * grand_parent_prob; //* parent_exp * (1-parent_exp) ; } sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; depth--; } depth = 1; node = sub_tree_size; out_grad = output_gradient[ node + offset ] + on_tree_gradient[ node + offset ] ; off_grad = off_tree_gradient[ node + offset ] ; node_exp = local_node_expectation[ node + offset ]; parent_exp = local_node_expectation[ node + offset + sub_tree_size + 1 ]; // Gradient w/r current node local_node_expectation_gradient[ node + offset ] += ( out_grad - off_grad ) * parent_exp; //* node_exp * ( 1 - node_exp ); // Gradient w/r parent node local_node_expectation_gradient[ node + offset + sub_tree_size + 1 ] += ( out_grad * node_exp + off_grad * (1 - node_exp) ); //* parent_exp * (1-parent_exp) ; node = 3*sub_tree_size+2; out_grad = output_gradient[ node + offset ] + on_tree_gradient[ node + offset ] ; off_grad = off_tree_gradient[ node + offset ] ; node_exp = local_node_expectation[ node + offset ]; parent_exp = local_node_expectation[ node + offset - sub_tree_size - 1 ]; // Gradient w/r current node local_node_expectation_gradient[ node + offset ] += ( out_grad - off_grad ) * ( 1 - parent_exp ) ; //* node_exp * ( 1 - node_exp ); // Gradient w/r parent node local_node_expectation_gradient[ node + offset - sub_tree_size - 1 ] -= ( out_grad * node_exp + off_grad * (1 - node_exp) ) ; //* parent_exp * (1-parent_exp) ; node = n_nodes_per_tree / 2; sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; out_grad = output_gradient[ node + offset ] + on_tree_gradient[ node + offset ] ; off_grad = off_tree_gradient[ node + offset ] ; node_exp = local_node_expectation[ node + offset ]; local_node_expectation_gradient[ node + offset ] += ( out_grad - off_grad );// * node_exp * ( 1 - node_exp ); offset += n_nodes_per_tree; } for( int i=0 ; i<size ; i++ ) { node_exp = local_node_expectation[i]; out_grad = local_node_expectation_gradient[i]; on_free_energy_gradient[i] = out_grad * node_exp * ( 1 - node_exp ); off_free_energy_gradient[i] = -out_grad * node_exp * ( 1 - node_exp ); } offset = 0; for( int t=0; t<n_trees; t++ ) { depth = 0; sub_tree_size = n_nodes_per_tree / 2; while( depth < tree_depth-1 ) { for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { out_grad = on_free_energy_gradient[ n + offset ]; node_exp = local_node_expectation[n + offset - (sub_tree_size/2+1)]; input_gradient[n+offset] += out_grad; on_free_energy_gradient[n + offset - (sub_tree_size/2+1)] += out_grad * node_exp; off_free_energy_gradient[n + offset - (sub_tree_size/2+1)] += out_grad * (1 - node_exp); out_grad = off_free_energy_gradient[ n + offset ]; node_exp = local_node_expectation[n + offset + (sub_tree_size/2+1)]; if( use_signed_samples ) input_gradient[n+offset] -= out_grad; on_free_energy_gradient[n + offset + (sub_tree_size/2+1)] += out_grad * node_exp; off_free_energy_gradient[n + offset + (sub_tree_size/2+1)] += out_grad * (1 - node_exp); } sub_tree_size /= 2; depth++; } depth = tree_depth-1; sub_tree_size = 0; for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { input_gradient[n+offset] += on_free_energy_gradient[ n + offset ]; if( use_signed_samples ) input_gradient[n+offset] -= off_free_energy_gradient[ n + offset ]; } offset += n_nodes_per_tree; } if( momentum != 0. ) bias_inc.resize( size ); for( int i=0 ; i<size ; i++ ) { real in_grad_i = input_gradient[i]; if( momentum == 0. ) { // update the bias: bias -= learning_rate * input_gradient bias[i] -= learning_rate * in_grad_i; } else { // The update rule becomes: // bias_inc = momentum * bias_inc - learning_rate * input_gradient // bias += bias_inc bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i; bias[i] += bias_inc[i]; } } applyBiasDecay(); }
void PLearn::RBMWoodsLayer::build | ( | ) | [virtual] |
Post-constructor.
The normal implementation should call simply inherited::build(), then this class's build_(). This method should be callable again at later times, after modifying some option fields to change the "architecture" of the object.
Reimplemented from PLearn::RBMLayer.
Definition at line 1226 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::build(), and build_().
{ inherited::build(); build_(); }
void PLearn::RBMWoodsLayer::build_ | ( | ) | [private] |
This does the actual building.
Reimplemented from PLearn::RBMLayer.
Definition at line 1202 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::build(), PLearn::ipow(), local_node_expectation, local_node_expectation_gradient, n_trees, off_expectation, off_free_energy, off_free_energy_gradient, off_tree_gradient, on_free_energy, on_free_energy_gradient, on_tree_gradient, PLASSERT, PLERROR, PLearn::TVec< T >::resize(), PLearn::RBMLayer::size, and tree_depth.
Referenced by build().
{ PLASSERT( n_trees > 0 ); PLASSERT( tree_depth > 0 ); if ( tree_depth < 2 ) PLERROR("RBMWoodsLayer::build_(): tree_depth < 2 not supported, use " "RBMBinomialLayer instead."); size = n_trees * ( ipow( 2, tree_depth ) - 1 ); local_node_expectation.resize( size ); on_free_energy.resize( size ); off_free_energy.resize( size ); off_expectation.resize( size ); local_node_expectation_gradient.resize( size ); on_tree_gradient.resize( size ); off_tree_gradient.resize( size ); on_free_energy_gradient.resize( size ); off_free_energy_gradient.resize( size ); // Must call parent's build, since size was just set inherited::build(); }
string PLearn::RBMWoodsLayer::classname | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 51 of file RBMWoodsLayer.cc.
void PLearn::RBMWoodsLayer::computeExpectation | ( | ) | [virtual] |
Compute marginal expectations of all units.
Implements PLearn::RBMLayer.
Definition at line 179 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::activation, PLearn::RBMLayer::expectation, PLearn::RBMLayer::expectation_is_up_to_date, i, PLearn::TVec< T >::length(), local_node_expectation, PLearn::logadd(), n, n_trees, off_expectation, off_free_energy, on_free_energy, PLearn::safeexp(), PLearn::RBMLayer::size, tree_depth, and use_signed_samples.
Referenced by bpropNLL(), and computeProbabilisticClustering().
{ if( expectation_is_up_to_date ) return; int n_nodes_per_tree = size / n_trees; int node, depth, sub_tree_size, grand_parent; int offset = 0; bool left_of_grand_parent; real grand_parent_prob; // Get local expectations at every node // HUGO: Note that local_node_expectation is really // used as a probability, even for signed samples. // Sorry for the misleading choice of variable name... // Divide and conquer computation of local (conditional) free energies for( int t=0; t<n_trees; t++ ) { depth = tree_depth-1; sub_tree_size = 0; // Initialize last level for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { //on_free_energy[ n + offset ] = safeexp(activation[n+offset]); //off_free_energy[ n + offset ] = 1; // Now working in log-domain on_free_energy[ n + offset ] = activation[n+offset]; if( use_signed_samples ) off_free_energy[ n + offset ] = -activation[n+offset]; else off_free_energy[ n + offset ] = 0; } depth = tree_depth-2; sub_tree_size = 1; while( depth >= 0 ) { for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { //on_free_energy[ n + offset ] = safeexp(activation[n+offset]) * // ( on_free_energy[n + offset - sub_tree_size] + off_free_energy[n + offset - sub_tree_size] ) ; //off_free_energy[ n + offset ] = // ( on_free_energy[n + offset + sub_tree_size] + off_free_energy[n + offset + sub_tree_size] ) ; // Now working in log-domain on_free_energy[ n + offset ] = activation[n+offset] + logadd( on_free_energy[n + offset - (sub_tree_size/2+1)], off_free_energy[n + offset - (sub_tree_size/2+1)] ) ; if( use_signed_samples ) off_free_energy[ n + offset ] = -activation[n+offset] + logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; else off_free_energy[ n + offset ] = logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; } sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; depth--; } offset += n_nodes_per_tree; } for( int i=0 ; i<size ; i++ ) //local_node_expectation[i] = on_free_energy[i] / ( on_free_energy[i] + off_free_energy[i] ); // Now working in log-domain local_node_expectation[i] = safeexp(on_free_energy[i] - logadd(on_free_energy[i], off_free_energy[i])); // Compute marginal expectations offset = 0; for( int t=0; t<n_trees; t++ ) { // Initialize root node = n_nodes_per_tree / 2; expectation[ node + offset ] = local_node_expectation[ node + offset ]; off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]); sub_tree_size = node; // First level nodes depth = 1; sub_tree_size /= 2; // Left child node = sub_tree_size; expectation[ node + offset ] = local_node_expectation[ node + offset ] * local_node_expectation[ node + offset + sub_tree_size + 1 ]; off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) * local_node_expectation[ node + offset + sub_tree_size + 1 ]; // Right child node = 3*sub_tree_size+2; expectation[ node + offset ] = local_node_expectation[ node + offset ] * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); // Set other nodes, level-wise depth = 2; sub_tree_size /= 2; while( depth < tree_depth ) { // Left child left_of_grand_parent = true; for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { if( left_of_grand_parent ) { grand_parent = n + offset + 3*sub_tree_size + 3; grand_parent_prob = expectation[ grand_parent ]; left_of_grand_parent = false; } else { grand_parent = n + offset - sub_tree_size - 1; grand_parent_prob = off_expectation[ grand_parent ]; left_of_grand_parent = true; } expectation[ n + offset ] = local_node_expectation[ n + offset ] * local_node_expectation[ n + offset + sub_tree_size + 1 ] * grand_parent_prob; off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) * local_node_expectation[ n + offset + sub_tree_size + 1 ] * grand_parent_prob; } // Right child left_of_grand_parent = true; for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { if( left_of_grand_parent ) { grand_parent = n + offset + sub_tree_size + 1; grand_parent_prob = expectation[ grand_parent ]; left_of_grand_parent = false; } else { grand_parent = n + offset - 3*sub_tree_size - 3; grand_parent_prob = off_expectation[ grand_parent ]; left_of_grand_parent = true; } expectation[ n + offset ] = local_node_expectation[ n + offset ] * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) * grand_parent_prob; off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) * grand_parent_prob; } sub_tree_size /= 2; depth++; } offset += n_nodes_per_tree; } if( use_signed_samples ) for( int i=0; i<expectation.length(); i++ ) expectation[i] = expectation[i] - off_expectation[i]; expectation_is_up_to_date = true; }
void PLearn::RBMWoodsLayer::computeExpectations | ( | ) | [virtual] |
Compute marginal mini-batch expectations of all units.
Implements PLearn::RBMLayer.
Definition at line 351 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::activations, b, PLearn::RBMLayer::batch_size, PLearn::RBMLayer::expectation, PLearn::RBMLayer::expectations, PLearn::RBMLayer::expectations_are_up_to_date, i, PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), local_node_expectations, PLearn::logadd(), n, n_trees, off_expectations, off_free_energies, on_free_energies, PLASSERT, PLearn::TMat< T >::resize(), PLearn::safeexp(), PLearn::RBMLayer::size, tree_depth, use_signed_samples, and PLearn::TMat< T >::width().
Referenced by bpropNLL().
{ if( expectations_are_up_to_date ) return; PLASSERT( expectations.width() == size && expectations.length() == batch_size ); off_expectations.resize(batch_size,size); local_node_expectations.resize(batch_size,size); on_free_energies.resize(batch_size,size); off_free_energies.resize(batch_size,size); int n_nodes_per_tree = size / n_trees; int node, depth, sub_tree_size, grand_parent; int offset = 0; bool left_of_grand_parent; real grand_parent_prob; for( int b=0; b<batch_size; b++ ) { offset=0; // Get local expectations at every node // HUGO: Note that local_node_expectations is really // used as a probability, even for signed samples. // Sorry for the misleading choice of variable name... // Divide and conquer computation of local (conditional) free energies for( int t=0; t<n_trees; t++ ) { depth = tree_depth-1; sub_tree_size = 0; // Initialize last level for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { //on_free_energies(b, n + offset ) = safeexp(activations(b,n+offset)); //off_free_energies(b, n + offset ) = 1; // Now working in log-domain on_free_energies(b, n + offset ) = activations(b,n+offset); if( use_signed_samples ) off_free_energies(b, n + offset ) = -activations(b,n+offset); else off_free_energies(b, n + offset ) = 0; } depth = tree_depth-2; sub_tree_size = 1; while( depth >= 0 ) { for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { //on_free_energies(b, n + offset ) = safeexp(activations(b,n+offset)) * // ( on_free_energies(b,n + offset - sub_tree_size) + off_free_energies(b,n + offset - sub_tree_size) ) ; //off_free_energies(b, n + offset ) = // ( on_free_energies(b,n + offset + sub_tree_size) + off_free_energies(b,n + offset + sub_tree_size) ) ; // Now working in log-domain on_free_energies(b, n + offset ) = activations(b,n+offset) + logadd( on_free_energies(b,n + offset - (sub_tree_size/2+1)), off_free_energies(b,n + offset - (sub_tree_size/2+1)) ) ; if( use_signed_samples ) off_free_energies(b, n + offset ) = -activations(b,n+offset) + logadd( on_free_energies(b,n + offset + (sub_tree_size/2+1)), off_free_energies(b,n + offset + (sub_tree_size/2+1)) ) ; else off_free_energies(b, n + offset ) = logadd( on_free_energies(b,n + offset + (sub_tree_size/2+1)), off_free_energies(b,n + offset + (sub_tree_size/2+1)) ) ; } sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; depth--; } offset += n_nodes_per_tree; } for( int i=0 ; i<size ; i++ ) //local_node_expectations(b,i) = on_free_energies(b,i) / ( on_free_energies(b,i) + off_free_energies(b,i) ); // Now working in log-domain local_node_expectations(b,i) = safeexp(on_free_energies(b,i) - logadd(on_free_energies(b,i), off_free_energies(b,i))); // Compute marginal expectations offset = 0; for( int t=0; t<n_trees; t++ ) { // Initialize root node = n_nodes_per_tree / 2; expectations(b, node + offset ) = local_node_expectations(b, node + offset ); off_expectations(b, node + offset ) = (1 - local_node_expectations(b, node + offset )); sub_tree_size = node; // First level nodes depth = 1; sub_tree_size /= 2; // Left child node = sub_tree_size; expectations(b, node + offset ) = local_node_expectations(b, node + offset ) * local_node_expectations(b, node + offset + sub_tree_size + 1 ); off_expectations(b, node + offset ) = (1 - local_node_expectations(b, node + offset )) * local_node_expectations(b, node + offset + sub_tree_size + 1 ); // Right child node = 3*sub_tree_size+2; expectations(b, node + offset ) = local_node_expectations(b, node + offset ) * (1 - local_node_expectations(b, node + offset - sub_tree_size - 1 )); off_expectations(b, node + offset ) = (1 - local_node_expectations(b, node + offset )) * (1 - local_node_expectations(b, node + offset - sub_tree_size - 1 )); // Set other nodes, level-wise depth = 2; sub_tree_size /= 2; while( depth < tree_depth ) { // Left child left_of_grand_parent = true; for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { if( left_of_grand_parent ) { grand_parent = n + offset + 3*sub_tree_size + 3; grand_parent_prob = expectations(b, grand_parent ); left_of_grand_parent = false; } else { grand_parent = n + offset - sub_tree_size - 1; grand_parent_prob = off_expectations(b, grand_parent ); left_of_grand_parent = true; } expectations(b, n + offset ) = local_node_expectations(b, n + offset ) * local_node_expectations(b, n + offset + sub_tree_size + 1 ) * grand_parent_prob; off_expectations(b, n + offset ) = (1 - local_node_expectations(b, n + offset )) * local_node_expectations(b, n + offset + sub_tree_size + 1 ) * grand_parent_prob; } // Right child left_of_grand_parent = true; for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { if( left_of_grand_parent ) { grand_parent = n + offset + sub_tree_size + 1; grand_parent_prob = expectations(b, grand_parent ); left_of_grand_parent = false; } else { grand_parent = n + offset - 3*sub_tree_size - 3; grand_parent_prob = off_expectations(b, grand_parent ); left_of_grand_parent = true; } expectations(b, n + offset ) = local_node_expectations(b, n + offset ) * (1 - local_node_expectations(b, n + offset - sub_tree_size - 1 )) * grand_parent_prob; off_expectations(b, n + offset ) = (1 - local_node_expectations(b, n + offset )) * (1 - local_node_expectations(b, n + offset - sub_tree_size - 1 )) * grand_parent_prob; } sub_tree_size /= 2; depth++; } offset += n_nodes_per_tree; } } if( use_signed_samples ) for( int b=0; b<batch_size; b++ ) for( int i=0; i<expectation.length(); i++ ) expectations(b,i) = expectations(b,i) - off_expectations(b,i); expectations_are_up_to_date = true; }
void PLearn::RBMWoodsLayer::computeProbabilisticClustering | ( | Vec & | prob_clusters | ) | [virtual] |
Definition at line 160 of file RBMWoodsLayer.cc.
References computeExpectation(), PLearn::RBMLayer::expectation, i, n_trees, off_expectation, PLearn::TVec< T >::resize(), and PLearn::RBMLayer::size.
{ computeExpectation(); int offset = 0; int n_nodes_per_tree = size / n_trees; prob_clusters.resize(n_trees*(n_nodes_per_tree+1)); for( int t=0; t<n_trees; t++ ) { for( int i=0; i<n_nodes_per_tree; i = i+2) prob_clusters[i+offset+t] = expectation[i+offset]; for( int i=0; i<n_nodes_per_tree; i = i+2) prob_clusters[i+1+offset+t] = off_expectation[i+offset]; offset += n_nodes_per_tree; } }
void PLearn::RBMWoodsLayer::declareOptions | ( | OptionList & | ol | ) | [static, protected] |
Declares the class options.
Reimplemented from PLearn::RBMLayer.
Definition at line 1182 of file RBMWoodsLayer.cc.
References PLearn::OptionBase::buildoption, PLearn::declareOption(), PLearn::RBMLayer::declareOptions(), n_trees, tree_depth, and use_signed_samples.
{ declareOption(ol, "n_trees", &RBMWoodsLayer::n_trees, OptionBase::buildoption, "Number of trees in the woods."); declareOption(ol, "tree_depth", &RBMWoodsLayer::tree_depth, OptionBase::buildoption, "Depth of the trees in the woods (1 gives the ordinary " "RBMBinomialLayer)."); declareOption(ol, "use_signed_samples", &RBMWoodsLayer::use_signed_samples, OptionBase::buildoption, "Indication that samples should be in {-1,1}, not {0,1}, at nodes where a\n" "left/right decision is made. Other nodes are set to 0.\n"); // Now call the parent class' declareOptions inherited::declareOptions(ol); }
static const PPath& PLearn::RBMWoodsLayer::declaringFile | ( | ) | [inline, static] |
Reimplemented from PLearn::RBMLayer.
Definition at line 151 of file RBMWoodsLayer.h.
:
//##### Not Options #####################################################
RBMWoodsLayer * PLearn::RBMWoodsLayer::deepCopy | ( | CopiesMap & | copies | ) | const [virtual] |
Reimplemented from PLearn::RBMLayer.
Definition at line 51 of file RBMWoodsLayer.cc.
compute -bias' unit_values
Reimplemented from PLearn::RBMLayer.
Definition at line 1252 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::bias, PLearn::dot(), and PLERROR.
Batch forward propagation.
Reimplemented from PLearn::RBMLayer.
Definition at line 695 of file RBMWoodsLayer.cc.
References PLearn::TMat< T >::length(), PLASSERT, PLERROR, PLearn::TMat< T >::resize(), PLearn::RBMLayer::size, and PLearn::TMat< T >::width().
{ int mbatch_size = inputs.length(); PLASSERT( inputs.width() == size ); outputs.resize( mbatch_size, size ); PLERROR( "RBMWoodsLayer::fprop(): not implemented yet" ); }
void PLearn::RBMWoodsLayer::fprop | ( | const Vec & | input, |
const Vec & | rbm_bias, | ||
Vec & | output | ||
) | const [virtual] |
forward propagation with provided bias
Reimplemented from PLearn::RBMLayer.
Definition at line 704 of file RBMWoodsLayer.cc.
References PLearn::OnlineLearningModule::input_size, PLearn::OnlineLearningModule::output_size, PLASSERT, PLERROR, PLearn::TVec< T >::resize(), and PLearn::TVec< T >::size().
{ PLASSERT( input.size() == input_size ); PLASSERT( rbm_bias.size() == input_size ); output.resize( output_size ); PLERROR( "RBMWoodsLayer::fprop(): not implemented yet" ); }
forward propagation
Reimplemented from PLearn::RBMLayer.
Definition at line 534 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::bias, i, PLearn::OnlineLearningModule::input_size, PLearn::TVec< T >::length(), local_node_expectation, PLearn::logadd(), n, n_trees, off_expectation, off_free_energy, on_free_energy, PLearn::OnlineLearningModule::output_size, PLASSERT, PLearn::TVec< T >::resize(), PLearn::safeexp(), PLearn::RBMLayer::size, PLearn::TVec< T >::size(), tree_depth, and use_signed_samples.
{ PLASSERT( input.size() == input_size ); output.resize( output_size ); int n_nodes_per_tree = size / n_trees; int node, depth, sub_tree_size, grand_parent; int offset = 0; bool left_of_grand_parent; real grand_parent_prob; // Get local expectations at every node // Divide and conquer computation of local (conditional) free energies for( int t=0; t<n_trees; t++ ) { depth = tree_depth-1; sub_tree_size = 0; // Initialize last level for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { //on_free_energy[ n + offset ] = safeexp(input[n+offset] + bias[n+offset]); //off_free_energy[ n + offset ] = 1; // Now working in log-domain on_free_energy[ n + offset ] = input[n+offset] + bias[n+offset]; if( use_signed_samples ) off_free_energy[ n + offset ] = -(input[n+offset] + bias[n+offset]); else off_free_energy[ n + offset ] = 0; } depth = tree_depth-2; sub_tree_size = 1; while( depth >= 0 ) { for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) { //on_free_energy[ n + offset ] = safeexp(input[n+offset] + bias[n+offset]) * // ( on_free_energy[n + offset - sub_tree_size] + off_free_energy[n + offset - sub_tree_size] ) ; //off_free_energy[ n + offset ] = // ( on_free_energy[n + offset + sub_tree_size] + off_free_energy[n + offset + sub_tree_size] ) ; // Now working in the log-domain on_free_energy[ n + offset ] = input[n+offset] + bias[n+offset] + logadd( on_free_energy[n + offset - (sub_tree_size/2+1)], off_free_energy[n + offset - (sub_tree_size/2+1)] ) ; if( use_signed_samples ) off_free_energy[ n + offset ] = -(input[n+offset] + bias[n+offset]) + logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; else off_free_energy[ n + offset ] = logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; } sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; depth--; } offset += n_nodes_per_tree; } for( int i=0 ; i<size ; i++ ) //local_node_expectation[i] = on_free_energy[i] / ( on_free_energy[i] + off_free_energy[i] ); // Now working in log-domain local_node_expectation[i] = safeexp(on_free_energy[i] - logadd(on_free_energy[i], off_free_energy[i])); // Compute marginal expectations offset = 0; for( int t=0; t<n_trees; t++ ) { // Initialize root node = n_nodes_per_tree / 2; output[ node + offset ] = local_node_expectation[ node + offset ]; off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]); sub_tree_size = node; // First level nodes depth = 1; sub_tree_size /= 2; // Left child node = sub_tree_size; output[ node + offset ] = local_node_expectation[ node + offset ] * local_node_expectation[ node + offset + sub_tree_size + 1 ]; off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) * local_node_expectation[ node + offset + sub_tree_size + 1 ]; // Right child node = 3*sub_tree_size+2; output[ node + offset ] = local_node_expectation[ node + offset ] * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); // Set other nodes, level-wise depth = 2; sub_tree_size /= 2; while( depth < tree_depth ) { // Left child left_of_grand_parent = true; for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { if( left_of_grand_parent ) { grand_parent = n + offset + 3*sub_tree_size + 3; grand_parent_prob = output[ grand_parent ]; left_of_grand_parent = false; } else { grand_parent = n + offset - sub_tree_size - 1; grand_parent_prob = off_expectation[ grand_parent ]; left_of_grand_parent = true; } output[ n + offset ] = local_node_expectation[ n + offset ] * local_node_expectation[ n + offset + sub_tree_size + 1 ] * grand_parent_prob; off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) * local_node_expectation[ n + offset + sub_tree_size + 1 ] * grand_parent_prob; } // Right child left_of_grand_parent = true; for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) { if( left_of_grand_parent ) { grand_parent = n + offset + sub_tree_size + 1; grand_parent_prob = output[ grand_parent ]; left_of_grand_parent = false; } else { grand_parent = n + offset - 3*sub_tree_size - 3; grand_parent_prob = off_expectation[ grand_parent ]; left_of_grand_parent = true; } output[ n + offset ] = local_node_expectation[ n + offset ] * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) * grand_parent_prob; off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) * grand_parent_prob; } sub_tree_size /= 2; depth++; } offset += n_nodes_per_tree; } if( use_signed_samples ) for( int i=0; i<output.length(); i++ ) output[i] = output[i] - off_expectation[i]; }
Computes the negative log-likelihood of target given the internal activations of the layer.
Reimplemented from PLearn::RBMLayer.
Definition at line 1069 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::activation, i, PLearn::OnlineLearningModule::input_size, PLASSERT, PLERROR, PLearn::TVec< T >::size(), PLearn::RBMLayer::size, PLearn::softplus(), PLearn::tabulated_softplus(), and PLearn::OnlineLearningModule::use_fast_approximations.
{ PLASSERT( target.size() == input_size ); PLERROR( "RBMWoodsLayer::fpropNLL(): not implemeted yet" ); real ret = 0; real target_i, activation_i; if(use_fast_approximations){ for( int i=0 ; i<size ; i++ ) { target_i = target[i]; activation_i = activation[i]; ret += tabulated_softplus(activation_i) - target_i * activation_i; // nll = - target*log(sigmoid(act)) -(1-target)*log(1-sigmoid(act)) // but it is numerically unstable, so use instead the following identity: // = target*softplus(-act) +(1-target)*(act+softplus(-act)) // = act + softplus(-act) - target*act // = softplus(act) - target*act } } else { for( int i=0 ; i<size ; i++ ) { target_i = target[i]; activation_i = activation[i]; ret += softplus(activation_i) - target_i * activation_i; } } return ret; }
Reimplemented from PLearn::RBMLayer.
Definition at line 1100 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::activation, PLearn::RBMLayer::activations, PLearn::RBMLayer::batch_size, PLearn::fast_exact_is_equal(), i, PLearn::OnlineLearningModule::input_size, PLearn::TMat< T >::length(), PLASSERT, PLERROR, PLearn::RBMLayer::size, PLearn::softplus(), PLearn::tabulated_softplus(), PLearn::OnlineLearningModule::use_fast_approximations, and PLearn::TMat< T >::width().
{ // computeExpectations(); // why? PLERROR( "RBMWoodsLayer::fpropNLL(): not implemeted yet" ); PLASSERT( targets.width() == input_size ); PLASSERT( targets.length() == batch_size ); PLASSERT( costs_column.width() == 1 ); PLASSERT( costs_column.length() == batch_size ); for (int k=0;k<batch_size;k++) // loop over minibatch { real nll = 0; real* activation = activations[k]; real* target = targets[k]; if(use_fast_approximations){ for( int i=0 ; i<size ; i++ ) // loop over outputs { if(!fast_exact_is_equal(target[i],0.0)) // nll -= target[i] * pl_log(expectations[i]); // but it is numerically unstable, so use instead // log (1/(1+exp(-x))) = -log(1+exp(-x)) = -softplus(-x) nll += target[i] * tabulated_softplus(-activation[i]); if(!fast_exact_is_equal(target[i],1.0)) // nll -= (1-target[i]) * pl_log(1-output[i]); // log (1 - 1/(1+exp(-x))) = log(exp(-x)/(1+exp(-x))) // = log(1/(1+exp(x))) // = -log(1+exp(x)) // = -softplus(x) nll += (1-target[i]) * tabulated_softplus(activation[i]); } } else { for( int i=0 ; i<size ; i++ ) // loop over outputs { if(!fast_exact_is_equal(target[i],0.0)) // nll -= target[i] * pl_log(expectations[i]); // but it is numerically unstable, so use instead // log (1/(1+exp(-x))) = -log(1+exp(-x)) = -softplus(-x) nll += target[i] * softplus(-activation[i]); if(!fast_exact_is_equal(target[i],1.0)) // nll -= (1-target[i]) * pl_log(1-output[i]); // log (1 - 1/(1+exp(-x))) = log(exp(-x)/(1+exp(-x))) // = log(1/(1+exp(x))) // = -log(1+exp(x)) // = -softplus(x) nll += (1-target[i]) * softplus(activation[i]); } } costs_column(k,0) = nll; } }
Computes This quantity is used for computing the free energy of a sample x in the OTHER layer of an RBM, from which unit_activations was computed.
Reimplemented from PLearn::RBMLayer.
Definition at line 1258 of file RBMWoodsLayer.cc.
References d, PLearn::logadd(), n, n_trees, PLASSERT, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), PLearn::RBMLayer::size, tree_depth, tree_energies, tree_free_energies, and use_signed_samples.
Referenced by freeEnergyContributionGradient().
{ PLASSERT( unit_activations.size() == size ); int n_nodes_per_tree = size / n_trees; tree_free_energies.resize(n_trees); tree_energies.resize(n_trees * (n_nodes_per_tree+1) ); int offset=0; int sub_tree_size = n_nodes_per_tree / 2; int sub_root = sub_tree_size; real result = 0; real tree_energy = 0; real tree_free_energy = 0; real leaf_activation = 0; for( int t = 0; t<n_trees; t++ ) { for( int n = 0; n < n_nodes_per_tree; n = n+2 ) // Looking only at leaves { // Computation energy of tree tree_energy = 0; sub_tree_size = n_nodes_per_tree / 2; sub_root = sub_tree_size; for( int d=0; d<tree_depth-1; d++ ) { if( n < sub_root ) { tree_energy -= unit_activations[offset+sub_root]; sub_tree_size /= 2; sub_root -= sub_tree_size + 1; } else { if( use_signed_samples ) tree_energy -= -unit_activations[offset+sub_root]; sub_tree_size /= 2; sub_root += sub_tree_size+1; } } leaf_activation = unit_activations[offset+n]; // Add free energy of tree with activated leaf if( n == 0) tree_free_energy = tree_energy - leaf_activation; else tree_free_energy = -logadd( -tree_energy + leaf_activation, -tree_free_energy ); tree_energies[offset+t+n] = tree_energy - leaf_activation; // Add free_energy of tree with inactivated leaf if( use_signed_samples ) { tree_free_energy = -logadd( -tree_energy - leaf_activation, -tree_free_energy ); tree_energies[offset+t+n+1] = tree_energy + leaf_activation; } else { tree_free_energy = -logadd( -tree_energy, -tree_free_energy ); tree_energies[offset+t+n+1] = tree_energy; } } tree_free_energies[t] = tree_free_energy; result += tree_free_energy; offset += n_nodes_per_tree; } return result; }
void PLearn::RBMWoodsLayer::freeEnergyContributionGradient | ( | const Vec & | unit_activations, |
Vec & | unit_activations_gradient, | ||
real | output_gradient = 1 , |
||
bool | accumulate = false |
||
) | const [virtual] |
Computes gradient of the result of freeEnergyContribution with respect to unit_activations.
Optionally, a gradient with respect to freeEnergyContribution can be given
Reimplemented from PLearn::RBMLayer.
Definition at line 1327 of file RBMWoodsLayer.cc.
References PLearn::TVec< T >::clear(), d, PLearn::TVec< T >::fill(), freeEnergyContribution(), i, PLearn::logadd(), n, n_trees, PLASSERT, PLearn::TVec< T >::resize(), PLearn::safeexp(), PLearn::TVec< T >::size(), PLearn::RBMLayer::size, tree_depth, tree_energies, tree_free_energies, unit_activations_neg_gradient, unit_activations_neg_gradient_init, unit_activations_pos_gradient, unit_activations_pos_gradient_init, and use_signed_samples.
{ PLASSERT( unit_activations.size() == size ); unit_activations_gradient.resize( size ); if( !accumulate ) unit_activations_gradient.clear(); // This method assumes freeEnergyContribution() has been called before, // with the same unit_activations vector!!! int n_nodes_per_tree = size / n_trees; int offset=0; int sub_tree_size = n_nodes_per_tree / 2; int sub_root = sub_tree_size; real tree_energy = 0; real tree_energy_gradient = 0; real tree_energy_leaf_on_gradient = 0; real tree_energy_leaf_off_gradient = 0; // Fills in the internal variables tree_energies and tree_free_energies. // I have to do this because I can't assume the last time freeEnergyContribution was // called was with the same unit_activations... freeEnergyContribution(unit_activations); unit_activations_neg_gradient.resize(size); unit_activations_neg_gradient_init.resize(size); unit_activations_neg_gradient_init.fill(false); if( use_signed_samples ) { unit_activations_pos_gradient.resize(size); unit_activations_pos_gradient_init.resize(size); unit_activations_pos_gradient_init.fill(false); } for( int t = 0; t<n_trees; t++ ) { for( int n = 0; n < n_nodes_per_tree; n = n+2 ) // Looking only at leaves { // Computation energy of tree tree_energy = 0; sub_tree_size = n_nodes_per_tree / 2; sub_root = sub_tree_size; // First do things on log-scale tree_energy_leaf_on_gradient = -tree_energies[offset+t+n] + tree_free_energies[t]; tree_energy_leaf_off_gradient = -tree_energies[offset+t+n+1] + tree_free_energies[t]; tree_energy_gradient = logadd(tree_energy_leaf_on_gradient, tree_energy_leaf_off_gradient); for( int d=0; d<tree_depth-1; d++ ) { if( n < sub_root ) { if( unit_activations_neg_gradient_init[offset+sub_root] ) unit_activations_neg_gradient[offset+sub_root] = logadd(tree_energy_gradient, unit_activations_neg_gradient[offset+sub_root]); else { unit_activations_neg_gradient[offset+sub_root] = tree_energy_gradient; unit_activations_neg_gradient_init[offset+sub_root] = true; } sub_tree_size /= 2; sub_root -= sub_tree_size + 1; } else { if( use_signed_samples ) { if( unit_activations_pos_gradient_init[offset+sub_root] ) unit_activations_pos_gradient[offset+sub_root] = logadd(tree_energy_gradient, unit_activations_pos_gradient[offset+sub_root]); else { unit_activations_pos_gradient[offset+sub_root] = tree_energy_gradient; unit_activations_pos_gradient_init[offset+sub_root] = true; } } sub_tree_size /= 2; sub_root += sub_tree_size+1; } } unit_activations_neg_gradient[offset+n] = tree_energy_leaf_on_gradient; unit_activations_neg_gradient_init[offset+n] = true; if( use_signed_samples ) { unit_activations_pos_gradient[offset+n] = tree_energy_leaf_off_gradient; unit_activations_pos_gradient_init[offset+n] = true; } } offset += n_nodes_per_tree; } // Go back to linear-scale for(int i=0; i<size; i++) unit_activations_gradient[i] -= output_gradient * safeexp( unit_activations_neg_gradient[i] ); if( use_signed_samples ) for(int i=0; i<size; i++) unit_activations_gradient[i] += output_gradient * safeexp( unit_activations_pos_gradient[i] ); }
void PLearn::RBMWoodsLayer::generateSample | ( | ) | [virtual] |
generate a sample, and update the sample field
Implements PLearn::RBMLayer.
Definition at line 64 of file RBMWoodsLayer.cc.
References PLearn::TVec< T >::clear(), PLearn::RBMLayer::expectation_is_up_to_date, local_node_expectation, n_trees, PLASSERT_MSG, PLCHECK_MSG, PLearn::RBMLayer::random_gen, PLearn::RBMLayer::sample, PLearn::RBMLayer::size, tree_depth, and use_signed_samples.
{ PLASSERT_MSG(random_gen, "random_gen should be initialized before generating samples"); PLCHECK_MSG(expectation_is_up_to_date, "Expectation should be computed " "before calling generateSample()"); sample.clear(); int n_nodes_per_tree = size / n_trees; int node, depth, node_sample, sub_tree_size; int offset = 0; for( int t=0; t<n_trees; t++ ) { depth = 0; node = n_nodes_per_tree / 2; sub_tree_size = node; while( depth < tree_depth ) { // HUGO: Note that local_node_expectation is really // used as a probability, even for signed samples. // Sorry for the misleading choice of variable name... node_sample = random_gen->binomial_sample( local_node_expectation[ node + offset ] ); if( use_signed_samples ) sample[node + offset] = 2*node_sample-1; else sample[node + offset] = node_sample; // Descending in the tree sub_tree_size /= 2; if ( node_sample > 0.5 ) node -= sub_tree_size+1; else node += sub_tree_size+1; depth++; } offset += n_nodes_per_tree; } }
void PLearn::RBMWoodsLayer::generateSamples | ( | ) | [virtual] |
Inherited.
Implements PLearn::RBMLayer.
Definition at line 110 of file RBMWoodsLayer.cc.
References b, PLearn::RBMLayer::batch_size, PLearn::TMat< T >::clear(), PLearn::RBMLayer::expectations_are_up_to_date, PLearn::TMat< T >::length(), local_node_expectations, n_trees, PLASSERT, PLASSERT_MSG, PLCHECK_MSG, PLearn::RBMLayer::random_gen, PLearn::RBMLayer::samples, PLearn::RBMLayer::size, tree_depth, use_signed_samples, and PLearn::TMat< T >::width().
{ PLASSERT_MSG(random_gen, "random_gen should be initialized before generating samples"); PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed " "before calling generateSamples()"); PLASSERT( samples.width() == size && samples.length() == batch_size ); //PLERROR( "RBMWoodsLayer::generateSamples(): not implemented yet" ); samples.clear(); int n_nodes_per_tree = size / n_trees; int node, depth, node_sample, sub_tree_size; int offset = 0; for( int b=0; b<batch_size; b++ ) { offset = 0; for( int t=0; t<n_trees; t++ ) { depth = 0; node = n_nodes_per_tree / 2; sub_tree_size = node; while( depth < tree_depth ) { // HUGO: Note that local_node_expectation is really // used as a probability, even for signed samples. // Sorry for the misleading choice of variable name... node_sample = random_gen->binomial_sample( local_node_expectations(b, node + offset ) ); if( use_signed_samples ) samples(b,node + offset) = 2*node_sample-1; else samples(b,node + offset) = node_sample; // Descending in the tree sub_tree_size /= 2; if ( node_sample > 0.5 ) node -= sub_tree_size+1; else node += sub_tree_size+1; depth++; } offset += n_nodes_per_tree; } } }
Computes the conf_index configuration of the layer.
Reimplemented from PLearn::RBMLayer.
Definition at line 1447 of file RBMWoodsLayer.cc.
References PLearn::TVec< T >::clear(), getConfigurationCount(), i, PLearn::ipow(), j, PLearn::TVec< T >::length(), n_trees, PLASSERT, PLearn::RBMLayer::size, PLearn::TVec< T >::subVec(), tree_depth, and use_signed_samples.
{ PLASSERT( output.length() == size ); PLASSERT( conf_index >= 0 && conf_index < getConfigurationCount() ); int n_conf_per_tree = ipow(2,tree_depth); int conf_i = conf_index; int begin = 0; int current_node, sub_tree_size, tree_conf_i; output.clear(); Vec output_i; for ( int i = 0; i < n_trees; ++i ) { output_i = output.subVec( begin, n_conf_per_tree-1 ); tree_conf_i = conf_i % n_conf_per_tree; // Get current tree's configuration output_i.clear(); current_node = (n_conf_per_tree-1)/2; sub_tree_size = current_node; for( int j=0; j < tree_depth; j++) { if( tree_conf_i < current_node + 1 ) { output_i[current_node] = 1; sub_tree_size /= 2; current_node -= sub_tree_size+1; } else { if( use_signed_samples ) output_i[current_node] = -1; sub_tree_size /= 2; current_node += sub_tree_size+1; } } conf_i /= n_conf_per_tree; begin += n_conf_per_tree-1; } }
int PLearn::RBMWoodsLayer::getConfigurationCount | ( | ) | [virtual] |
Returns a number of different configurations the layer can be in.
Reimplemented from PLearn::RBMLayer.
Definition at line 1438 of file RBMWoodsLayer.cc.
References PLearn::RBMLayer::INFINITE_CONFIGURATIONS, PLearn::ipow(), n_trees, and tree_depth.
Referenced by getConfiguration().
{ real ret = ipow(ipow(2.0,tree_depth),n_trees); if( ret > INT_MAX ) return INFINITE_CONFIGURATIONS; else return (int) round(ret); }
OptionList & PLearn::RBMWoodsLayer::getOptionList | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 51 of file RBMWoodsLayer.cc.
OptionMap & PLearn::RBMWoodsLayer::getOptionMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 51 of file RBMWoodsLayer.cc.
RemoteMethodMap & PLearn::RBMWoodsLayer::getRemoteMethodMap | ( | ) | const [virtual] |
Reimplemented from PLearn::Object.
Definition at line 51 of file RBMWoodsLayer.cc.
void PLearn::RBMWoodsLayer::makeDeepCopyFromShallowCopy | ( | CopiesMap & | copies | ) | [virtual] |
Transforms a shallow copy into a deep copy.
Reimplemented from PLearn::RBMLayer.
Definition at line 1233 of file RBMWoodsLayer.cc.
References PLearn::deepCopyField(), local_node_expectation, local_node_expectation_gradient, local_node_expectations, PLearn::RBMLayer::makeDeepCopyFromShallowCopy(), off_expectation, off_expectations, off_free_energies, off_free_energy, off_free_energy_gradient, off_tree_gradient, on_free_energies, on_free_energy, on_free_energy_gradient, and on_tree_gradient.
{ inherited::makeDeepCopyFromShallowCopy(copies); deepCopyField( off_expectation, copies ); deepCopyField( off_expectations, copies ); deepCopyField( local_node_expectation, copies ); deepCopyField( local_node_expectations, copies ); deepCopyField( on_free_energy, copies ); deepCopyField( on_free_energies, copies ); deepCopyField( off_free_energy, copies ); deepCopyField( off_free_energies, copies ); deepCopyField( local_node_expectation_gradient, copies ); deepCopyField( on_tree_gradient, copies ); deepCopyField( off_tree_gradient, copies ); deepCopyField( on_free_energy_gradient, copies ); deepCopyField( off_free_energy_gradient, copies ); }
Reimplemented from PLearn::RBMLayer.
Definition at line 151 of file RBMWoodsLayer.h.
Vec PLearn::RBMWoodsLayer::local_node_expectation [protected] |
Definition at line 166 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), computeExpectation(), fprop(), generateSample(), and makeDeepCopyFromShallowCopy().
Definition at line 177 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), and makeDeepCopyFromShallowCopy().
Mat PLearn::RBMWoodsLayer::local_node_expectations [protected] |
Definition at line 167 of file RBMWoodsLayer.h.
Referenced by computeExpectations(), generateSamples(), and makeDeepCopyFromShallowCopy().
Number of trees in the woods.
Definition at line 60 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), computeExpectation(), computeExpectations(), computeProbabilisticClustering(), declareOptions(), fprop(), freeEnergyContribution(), freeEnergyContributionGradient(), generateSample(), generateSamples(), getConfiguration(), and getConfigurationCount().
Vec PLearn::RBMWoodsLayer::off_expectation [protected] |
Definition at line 163 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), computeExpectation(), computeProbabilisticClustering(), fprop(), and makeDeepCopyFromShallowCopy().
Mat PLearn::RBMWoodsLayer::off_expectations [protected] |
Definition at line 164 of file RBMWoodsLayer.h.
Referenced by computeExpectations(), and makeDeepCopyFromShallowCopy().
Mat PLearn::RBMWoodsLayer::off_free_energies [protected] |
Definition at line 174 of file RBMWoodsLayer.h.
Referenced by computeExpectations(), and makeDeepCopyFromShallowCopy().
Vec PLearn::RBMWoodsLayer::off_free_energy [protected] |
Definition at line 173 of file RBMWoodsLayer.h.
Referenced by build_(), computeExpectation(), fprop(), and makeDeepCopyFromShallowCopy().
Vec PLearn::RBMWoodsLayer::off_free_energy_gradient [protected] |
Definition at line 182 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), and makeDeepCopyFromShallowCopy().
Vec PLearn::RBMWoodsLayer::off_tree_gradient [protected] |
Definition at line 180 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), and makeDeepCopyFromShallowCopy().
Mat PLearn::RBMWoodsLayer::on_free_energies [protected] |
Definition at line 171 of file RBMWoodsLayer.h.
Referenced by computeExpectations(), and makeDeepCopyFromShallowCopy().
Vec PLearn::RBMWoodsLayer::on_free_energy [protected] |
Definition at line 170 of file RBMWoodsLayer.h.
Referenced by build_(), computeExpectation(), fprop(), and makeDeepCopyFromShallowCopy().
Vec PLearn::RBMWoodsLayer::on_free_energy_gradient [protected] |
Definition at line 181 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), and makeDeepCopyFromShallowCopy().
Vec PLearn::RBMWoodsLayer::on_tree_gradient [protected] |
Definition at line 179 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), and makeDeepCopyFromShallowCopy().
Depth of the trees in the woods (1 gives the ordinary RBMBinomialLayer)
Definition at line 63 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), build_(), computeExpectation(), computeExpectations(), declareOptions(), fprop(), freeEnergyContribution(), freeEnergyContributionGradient(), generateSample(), generateSamples(), getConfiguration(), and getConfigurationCount().
Vec PLearn::RBMWoodsLayer::tree_energies [mutable, protected] |
Definition at line 186 of file RBMWoodsLayer.h.
Referenced by freeEnergyContribution(), and freeEnergyContributionGradient().
Vec PLearn::RBMWoodsLayer::tree_free_energies [mutable, protected] |
Definition at line 185 of file RBMWoodsLayer.h.
Referenced by freeEnergyContribution(), and freeEnergyContributionGradient().
Vec PLearn::RBMWoodsLayer::unit_activations_neg_gradient [mutable, protected] |
Definition at line 188 of file RBMWoodsLayer.h.
Referenced by freeEnergyContributionGradient().
TVec<bool> PLearn::RBMWoodsLayer::unit_activations_neg_gradient_init [mutable, protected] |
Definition at line 190 of file RBMWoodsLayer.h.
Referenced by freeEnergyContributionGradient().
Vec PLearn::RBMWoodsLayer::unit_activations_pos_gradient [mutable, protected] |
Definition at line 187 of file RBMWoodsLayer.h.
Referenced by freeEnergyContributionGradient().
TVec<bool> PLearn::RBMWoodsLayer::unit_activations_pos_gradient_init [mutable, protected] |
Definition at line 189 of file RBMWoodsLayer.h.
Referenced by freeEnergyContributionGradient().
Indication that samples should be in {-1,1}, not {0,1}, at nodes where a left/right decision is made.
Other nodes are set to 0.
Definition at line 67 of file RBMWoodsLayer.h.
Referenced by bpropUpdate(), computeExpectation(), computeExpectations(), declareOptions(), fprop(), freeEnergyContribution(), freeEnergyContributionGradient(), generateSample(), generateSamples(), and getConfiguration().