PLearn 0.1
RBMLQParameters.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMLQParameters.cc
00004 //
00005 // Copyright (C) 2006 Dan Popovici
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Dan Popovici
00036 
00041 #include "RBMLQParameters.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     RBMLQParameters,
00049     "Stores and learns the parameters of an RBM between one quadratic layer at the bottom and one linear layer at the top",
00050     "");
00051 
00052 RBMLQParameters::RBMLQParameters( real the_learning_rate ) :
00053     inherited(the_learning_rate)
00054 {
00055 }
00056 
00057 RBMLQParameters::RBMLQParameters( string down_types, string up_types,
00058                                   real the_learning_rate ) :
00059     inherited( down_types, up_types, the_learning_rate )
00060 {
00061     // We're not sure inherited::build() has been called
00062     build();
00063 }
00064 
00065 void RBMLQParameters::declareOptions(OptionList& ol)
00066 {
00067     // ### Declare all of this object's options here.
00068     // ### For the "flags" of each option, you should typically specify
00069     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00070     // ### OptionBase::tuningoption. If you don't provide one of these three,
00071     // ### this option will be ignored when loading values from a script.
00072     // ### You can also combine flags, for example with OptionBase::nosave:
00073     // ### (OptionBase::buildoption | OptionBase::nosave)
00074 
00075     declareOption(ol, "weights", &RBMLQParameters::weights,
00076                   OptionBase::learntoption,
00077                   "Matrix containing unit-to-unit weights (output_size ×"
00078                   " input_size)");
00079 
00080     declareOption(ol, "down_units_bias",
00081                   &RBMLQParameters::down_units_bias,
00082                   OptionBase::learntoption,
00083                   "Element i contains the bias of up unit i");
00084 
00085     declareOption(ol, "up_units_params",
00086                   &RBMLQParameters::up_units_params,
00087                   OptionBase::learntoption,
00088                   "Element 0,i contains the bias of down unit i. Element 1,i"
00089                   "contains the quadratic term of down unit i ");
00090 
00091     // Now call the parent class' declareOptions
00092     inherited::declareOptions(ol);
00093 }
00094 
00095 void RBMLQParameters::build_()
00096 {
00097     if( up_layer_size == 0 || down_layer_size == 0 )
00098         return;
00099 
00100     output_size = 0;
00101     bool needs_forget = false; // do we need to reinitialize the parameters?
00102 
00103     if( weights.length() != up_layer_size ||
00104         weights.width() != down_layer_size )
00105     {
00106         weights.resize( up_layer_size, down_layer_size );
00107         needs_forget = true;
00108     }
00109 
00110     weights_pos_stats.resize( up_layer_size, down_layer_size );
00111     weights_neg_stats.resize( up_layer_size, down_layer_size );
00112 
00113     down_units_bias.resize( down_layer_size );
00114     down_units_bias_pos_stats.resize( down_layer_size );
00115     down_units_bias_neg_stats.resize( down_layer_size );
00116     for( int i=0 ; i<down_layer_size ; i++ )
00117     {
00118         char dut_i = down_units_types[i];
00119         if( dut_i != 'l' ) // not linear activation unit
00120             PLERROR( "RBMLQParameters::build_() - value '%c' for"
00121                      " up_units_types[%d]\n"
00122                      "should be 'l'.\n",
00123                      dut_i, i );
00124     }
00125     
00126     up_units_params.resize( 2 ) ;
00127     up_units_params[0].resize( up_layer_size );
00128     up_units_params[1].resize( up_layer_size );
00129 
00130     up_units_params_pos_stats.resize( 2 );
00131     up_units_params_pos_stats[0].resize( up_layer_size );
00132     up_units_params_pos_stats[1].resize( up_layer_size );
00133     
00134     up_units_params_neg_stats.resize( 2 );
00135     up_units_params_neg_stats[0].resize( up_layer_size );
00136     up_units_params_neg_stats[1].resize( up_layer_size );
00137 
00138     for( int i=0 ; i<up_layer_size ; i++ )
00139     {
00140         char uut_i = up_units_types[i];
00141         if( uut_i != 'q' ) // not quadratic activation unit
00142             PLERROR( "RBMLQParameters::build_() - value '%c' for"
00143                      " down_units_types[%d]\n"
00144                      "should be 'q'.\n",
00145                      uut_i, i );
00146     }
00147 
00148     if( needs_forget )
00149         forget();
00150 
00151     clearStats();
00152 }
00153 
00154 void RBMLQParameters::build()
00155 {
00156     inherited::build();
00157     build_();
00158 }
00159 
00160 
00161 void RBMLQParameters::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00162 {
00163     inherited::makeDeepCopyFromShallowCopy(copies);
00164 
00165     deepCopyField(weights, copies);
00166     deepCopyField(down_units_bias, copies);
00167     deepCopyField(up_units_params, copies);
00168     deepCopyField(weights_pos_stats, copies);
00169     deepCopyField(weights_neg_stats, copies);
00170     deepCopyField(down_units_bias_pos_stats, copies);
00171     deepCopyField(down_units_bias_neg_stats, copies);
00172     deepCopyField(up_units_params_pos_stats, copies);
00173     deepCopyField(up_units_params_neg_stats, copies);
00174 }
00175 
00176 void RBMLQParameters::accumulatePosStats( const Vec& down_values,
00177                                           const Vec& up_values )
00178 {
00179     // weights_pos_stats += up_values * down_values'
00180     externalProductAcc( weights_pos_stats, up_values, down_values );
00181 
00182     down_units_bias_pos_stats += down_values;
00183     up_units_params_pos_stats[0] += up_values;
00184 
00185     for(int i=0 ; i<up_layer_size ; ++i) { 
00186         up_units_params_pos_stats[1][i] += 2 * up_units_params[1][i] *
00187             up_values[i] * up_values[i];
00188     }
00189     
00190     pos_count++;
00191 }
00192 
00193 void RBMLQParameters::accumulateNegStats( const Vec& down_values,
00194                                                const Vec& up_values )
00195 {
00196     // weights_pos_stats += up_values * down_values'
00197     externalProductAcc( weights_neg_stats, up_values, down_values );
00198 
00199     down_units_bias_neg_stats += down_values;
00200     up_units_params_neg_stats[0] += up_values;
00201 
00202     for(int i=0 ; i<up_layer_size ; ++i) { 
00203         up_units_params_neg_stats[1][i] += 2 * up_units_params[1][i] *
00204             up_values[i] *up_values[i];
00205     }
00206     
00207     neg_count++;
00208 
00209     
00210 }
00211 
00212 void RBMLQParameters::update()
00213 {
00214     // updates parameters
00215     //weights -= learning_rate * (weights_pos_stats/pos_count
00216     //                              - weights_neg_stats/neg_count)
00217     weights_pos_stats /= pos_count;
00218     weights_neg_stats /= neg_count;
00219     weights_pos_stats -= weights_neg_stats;
00220     weights_pos_stats *= learning_rate;
00221     weights -= weights_pos_stats;
00222     
00223     for( int i=0 ; i<up_layer_size ; i++ )
00224     {
00225         down_units_bias[i] -=
00226             learning_rate * (down_units_bias_pos_stats[i]/pos_count
00227                              - down_units_bias_neg_stats[i]/neg_count);
00228     }
00229 
00230     for( int i=0 ; i<down_layer_size ; i++ )
00231     {
00232         up_units_params[0][i] -=
00233             learning_rate * (up_units_params_pos_stats[0][i]/pos_count
00234                              - up_units_params_neg_stats[0][i]/neg_count);
00235         
00236         up_units_params[1][i] -=
00237             learning_rate * (up_units_params_pos_stats[1][i]/pos_count
00238                              - up_units_params_neg_stats[1][i]/neg_count);
00239     }
00240 
00241     clearStats();
00242 }
00243 
00244 void RBMLQParameters::clearStats()
00245 {
00246     weights_pos_stats.clear();
00247     weights_neg_stats.clear();
00248 
00249     up_units_params_pos_stats[0].clear();
00250     up_units_params_pos_stats[1].clear();
00251     
00252     up_units_params_neg_stats[0].clear();
00253     up_units_params_neg_stats[1].clear();
00254 
00255     down_units_bias_pos_stats.clear();
00256     down_units_bias_neg_stats.clear();
00257 
00258     pos_count = 0;
00259     neg_count = 0;
00260 }
00261 
00262 void RBMLQParameters::computeUnitActivations
00263     ( int start, int length, const Vec& activations ) const
00264 {
00265     //activations[2 * i] = mu of unit (i - start)
00266     //activations[2 * i + 1] = sigma of unit (i - start)
00267     if( going_up )
00268     {
00269 
00270         // TODO: change it to work with start and length
00271         PLASSERT( start+length <= down_layer_size );
00272         Mat activations_mat = activations.toMat( activations.length()/2 , 2);
00273         Mat mu = activations_mat.column(0) ; 
00274         Mat sigma = activations_mat.column(1) ; 
00275         
00276         product( mu , weights , input_vec.toMat(input_vec.length(),1) );
00277 
00278 
00279         // activations[i-start] = sum_j weights(j,i) input_vec[j] + b[i]
00280         for(int i=0 ; i<length ; ++i) { 
00281             real a_i = up_units_params[1][i] ; 
00282             mu[i][0] = - (mu[i][0] + up_units_params[0][i]) / (2 * a_i * a_i)     ; 
00283             sigma[i][0] = 1 / (2. * a_i * a_i) ; 
00284         }
00285         
00286     }
00287     else
00288     {
00289         PLASSERT( activations.length() == length );
00290         PLASSERT( start+length <= up_layer_size );
00291         // mu = activations[i] = -(sum_j weights(i,j) input_vec[j] + b[i])
00292         //                    / (2 * up_units_params[i][1]^2)
00293 //        product( weights, input_vec , activations) ;
00294         transposeProduct( activations , weights, input_vec ) ;
00295         activations += down_units_bias ; 
00296     }
00297 }
00298 
00299 inline double cube(double x){
00300     return x*x*x ; 
00301 }
00303 void RBMLQParameters::bpropUpdate(const Vec& input, const Vec& output,
00304                                   Vec& input_gradient,
00305                                   const Vec& output_gradient)
00306 {
00307     //TODO: clean up the code a bit
00308     PLASSERT( input.size() == down_layer_size );
00309     PLASSERT( output.size() == 2 * up_layer_size );
00310     PLASSERT( output_gradient.size() == 2 * up_layer_size );
00311     input_gradient.resize( down_layer_size );
00312 
00313     // weights -= learning_rate * output_gradient * input'
00314 //    externalProductAcc( weights, (-learning_rate)*output_gradient, input );
00315 
00316     Vec scaled_out_grad(up_layer_size) ;  
00317     
00318     Vec prod_w_input( up_layer_size ) ; 
00319     
00320     for(int i=0 ; i<up_layer_size ; ++i) 
00321     {
00322         real a_i_square = up_units_params[1][i] * up_units_params[1][i] ; 
00323         
00324         scaled_out_grad[i] = -0.5 * output_gradient[2 * i] / a_i_square ; 
00325         
00326 //        up_units_params[0][i] -= learning_rate * ( -0.5 / a_i_square ) *
00327 //                                 output_gradient[2*i] ; 
00328         
00329         for(int j=0 ; j < down_layer_size ; ++j) {             
00330             prod_w_input[i] += weights[i][j] * input[j] ; 
00331         }
00332     }
00333 
00334     // input_gradient = weights' * output_gradient
00335     transposeProduct( input_gradient, weights, scaled_out_grad );
00336 
00337     externalProductAcc( weights, (-learning_rate)*scaled_out_grad, input );
00338     
00339     // (up) bias -= learning_rate * output_gradient
00340     multiplyAcc( up_units_params[0], scaled_out_grad, -learning_rate );
00341 
00342     for(int i=0 ; i<up_layer_size ; ++i) { 
00343       up_units_params[1][i] -= learning_rate * ( up_units_params[0][i] +
00344                 prod_w_input[i] ) / (cube(up_units_params[1][i])) *
00345           output_gradient[2*i] ; 
00346     }
00347 
00348     // (up) bias -= learning_rate * output_gradient
00349 //    multiplyAcc( up_units_params[0], output_gradient, -learning_rate );
00350 
00351 }
00352 
00355 void RBMLQParameters::forget()
00356 {
00357     if( initialization_method == "zero" )
00358         weights.clear();
00359     else
00360     {
00361         if( !random_gen )
00362             random_gen = new PRandom();
00363 
00364         real d = 1. / max( down_layer_size, up_layer_size );
00365         if( initialization_method == "uniform_sqrt" )
00366             d = sqrt( d );
00367 
00368         random_gen->fill_random_uniform( weights, -d, d );
00369     }
00370 
00371     up_units_params[0].clear();    
00372     up_units_params[1].fill(1.);
00373     
00374     down_units_bias.clear();
00375 
00376     clearStats();
00377 }
00378 
00379 
00380 /* THIS METHOD IS OPTIONAL
00385 void RBMLQParameters::finalize()
00386 {
00387 }
00388 */
00390 int RBMLQParameters::nParameters(bool share_up_params, bool share_down_params) const
00391 {
00392     int m = weights.size() +  (share_down_params?down_units_bias.size():0);
00393     if (share_up_params)
00394         for (int i=0;i<up_units_params.length();i++)
00395             m += up_units_params[i].size();
00396     return m;
00397 }
00398 
00404 Vec RBMLQParameters::makeParametersPointHere(const Vec& global_parameters, bool share_up_params, bool share_down_params)
00405 {
00406     int n = nParameters(share_up_params,share_down_params);
00407     int m = global_parameters.size();
00408     if (m<n)
00409         PLERROR("RBMLLParameters::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n);
00410     real* p = global_parameters.data();
00411     weights.makeSharedValue(p,weights.size());
00412     p+=weights.size();
00413     if (share_down_params)
00414     {
00415         down_units_bias.makeSharedValue(p,down_units_bias.size());
00416         p+=down_units_bias.size();
00417     }
00418     if (share_up_params)
00419         for (int i=0;i<up_units_params.length();i++)
00420         {
00421             up_units_params[i].makeSharedValue(p,up_units_params[i].size());
00422             p+=up_units_params[i].size();
00423         }
00424     return global_parameters.subVec(n,m-n);
00425 }
00426 
00427 
00428 
00429 } // end of namespace PLearn
00430 
00431 
00432 /*
00433   Local Variables:
00434   mode:c++
00435   c-basic-offset:4
00436   c-file-style:"stroustrup"
00437   c-file-offsets:((innamespace . 0)(inline-open . 0))
00438   indent-tabs-mode:nil
00439   fill-column:79
00440   End:
00441 */
00442 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines