PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // RBMTruncExpLayer.cc 00004 // 00005 // Copyright (C) 2006 Pascal Lamblin & Dan Popovici 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pascal Lamblin & Dan Popovici 00036 00039 #include "RBMTruncExpLayer.h" 00040 #include <plearn/math/TMat_maths.h> 00041 #include "RBMParameters.h" 00042 00043 namespace PLearn { 00044 using namespace std; 00045 00046 PLEARN_IMPLEMENT_OBJECT( 00047 RBMTruncExpLayer, 00048 "RBM Layer where unit distribution is a truncated exponential in [0,1]", 00049 ""); 00050 00051 RBMTruncExpLayer::RBMTruncExpLayer() 00052 { 00053 } 00054 00055 RBMTruncExpLayer::RBMTruncExpLayer( int the_size ) 00056 { 00057 size = the_size; 00058 units_types = string( the_size, 'l' ); 00059 activations.resize( the_size ); 00060 sample.resize( the_size ); 00061 expectation.resize( the_size ); 00062 expectation_is_up_to_date = false; 00063 } 00064 00067 void RBMTruncExpLayer::getUnitActivations( int i, PP<RBMParameters> rbmp, 00068 int offset ) 00069 { 00070 Vec activation = activations.subVec( i, 1 ); 00071 rbmp->computeUnitActivations( i+offset, 1, activation ); 00072 expectation_is_up_to_date = false; 00073 } 00074 00077 void RBMTruncExpLayer::getAllActivations( PP<RBMParameters> rbmp, int offset ) 00078 { 00079 rbmp->computeUnitActivations( offset, size, activations ); 00080 expectation_is_up_to_date = false; 00081 } 00082 00083 void RBMTruncExpLayer::generateSample() 00084 { 00085 /* The cumulative is : 00086 * C(U) = P(u<U | x) = (1 - exp(-U a)) / (1 - exp(-a)) if 0 < U < 1, 00087 * 0 if U <= 0 and 00088 * 1 if 1 <= U 00089 * 00090 * And the inverse, if 0 <= s <=1: 00091 * C^{-1}(s) = - log(1 - s*(1 - exp(-a)) / a 00092 */ 00093 00094 for( int i=0 ; i<size ; i++ ) 00095 { 00096 real s = random_gen->uniform_sample(); 00097 real a_i = activations[i]; 00098 sample[i] = - pl_log( 1. - s*( 1 - exp(-a_i) ) ) / a_i; 00099 } 00100 } 00101 00102 void RBMTruncExpLayer::computeExpectation() 00103 { 00104 if( expectation_is_up_to_date ) 00105 return; 00106 00107 /* Conditional expectation: 00108 * E[u|x] = 1/(1-exp(a)) + 1/a 00109 */ 00110 00111 for( int i=0 ; i<size ; i++ ) 00112 { 00113 real a_i = activations[i]; 00114 expectation[i] = 1/(1-exp(a_i)) + 1/a_i; 00115 } 00116 00117 expectation_is_up_to_date = true; 00118 } 00119 00120 void RBMTruncExpLayer::bpropUpdate(const Vec& input, const Vec& output, 00121 Vec& input_gradient, 00122 const Vec& output_gradient) 00123 { 00124 PLASSERT( input.size() == size ); 00125 PLASSERT( output.size() == size ); 00126 PLASSERT( output_gradient.size() == size ); 00127 input_gradient.resize( size ); 00128 00129 // df/da = exp(a)/(1-exp(a))^2 - 1/a^2 00130 00131 for( int i=0 ; i<size ; i++ ) 00132 { 00133 real a_i = input[i]; 00134 real ea_i = exp( a_i ); 00135 input_gradient[i] = ea_i/( (1 - ea_i) * (1 - ea_i) ) + 1/(a_i * a_i); 00136 } 00137 } 00138 00139 00140 00141 void RBMTruncExpLayer::declareOptions(OptionList& ol) 00142 { 00143 /* 00144 declareOption(ol, "size", &RBMTruncExpLayer::size, 00145 OptionBase::buildoption, 00146 "Number of units."); 00147 */ 00148 // Now call the parent class' declareOptions 00149 inherited::declareOptions(ol); 00150 } 00151 00152 void RBMTruncExpLayer::build_() 00153 { 00154 if( size < 0 ) 00155 size = int(units_types.size()); 00156 if( size != (int) units_types.size() ) 00157 units_types = string( size, 'l' ); 00158 00159 activations.resize( size ); 00160 sample.resize( size ); 00161 expectation.resize( size ); 00162 expectation_is_up_to_date = false; 00163 } 00164 00165 void RBMTruncExpLayer::build() 00166 { 00167 inherited::build(); 00168 build_(); 00169 } 00170 00171 00172 void RBMTruncExpLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00173 { 00174 inherited::makeDeepCopyFromShallowCopy(copies); 00175 } 00176 00177 00178 } // end of namespace PLearn 00179 00180 00181 /* 00182 Local Variables: 00183 mode:c++ 00184 c-basic-offset:4 00185 c-file-style:"stroustrup" 00186 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00187 indent-tabs-mode:nil 00188 fill-column:79 00189 End: 00190 */ 00191 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :