PLearn 0.1
RBMQLParameters.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMQLParameters.cc
00004 //
00005 // Copyright (C) 2006 Dan Popovici
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Dan Popovici
00036 
00041 #include "RBMQLParameters.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     RBMQLParameters,
00049     "Stores and learns the parameters of an RBM between one quadratic layer at the bottom and one linear layer at the top",
00050     "");
00051 
00052 RBMQLParameters::RBMQLParameters( real the_learning_rate ) :
00053     inherited(the_learning_rate)
00054 {
00055 }
00056 
00057 RBMQLParameters::RBMQLParameters( string down_types, string up_types,
00058                                   real the_learning_rate ) :
00059     inherited( down_types, up_types, the_learning_rate )
00060 {
00061     // We're not sure inherited::build() has been called
00062     build();
00063 }
00064 
00065 void RBMQLParameters::declareOptions(OptionList& ol)
00066 {
00067     // ### Declare all of this object's options here.
00068     // ### For the "flags" of each option, you should typically specify
00069     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00070     // ### OptionBase::tuningoption. If you don't provide one of these three,
00071     // ### this option will be ignored when loading values from a script.
00072     // ### You can also combine flags, for example with OptionBase::nosave:
00073     // ### (OptionBase::buildoption | OptionBase::nosave)
00074 
00075     declareOption(ol, "weights", &RBMQLParameters::weights,
00076                   OptionBase::learntoption,
00077                   "Matrix containing unit-to-unit weights (output_size ×"
00078                   " input_size)");
00079 
00080     declareOption(ol, "up_units_bias",
00081                   &RBMQLParameters::up_units_bias,
00082                   OptionBase::learntoption,
00083                   "Element i contains the bias of up unit i");
00084 
00085     declareOption(ol, "down_units_params",
00086                   &RBMQLParameters::down_units_params,
00087                   OptionBase::learntoption,
00088                   "Element 0,i contains the bias of down unit i. Element 1,i"
00089                   "contains the quadratic term of down unit i ");
00090 
00091     // Now call the parent class' declareOptions
00092     inherited::declareOptions(ol);
00093 }
00094 
00095 void RBMQLParameters::build_()
00096 {
00097     if( up_layer_size == 0 || down_layer_size == 0 )
00098         return;
00099 
00100     output_size = 0;
00101     bool needs_forget = false; // do we need to reinitialize the parameters?
00102 
00103     if( weights.length() != up_layer_size ||
00104         weights.width() != down_layer_size )
00105     {
00106         weights.resize( up_layer_size, down_layer_size );
00107         needs_forget = true;
00108     }
00109 
00110     weights_pos_stats.resize( up_layer_size, down_layer_size );
00111     weights_neg_stats.resize( up_layer_size, down_layer_size );
00112 
00113     down_units_params.resize( 2 );
00114     down_units_params[0].resize( down_layer_size ) ;     
00115     down_units_params[1].resize( down_layer_size ) ; 
00116     
00117     down_units_params_pos_stats.resize( 2 );
00118     down_units_params_pos_stats[0].resize( down_layer_size );
00119     down_units_params_pos_stats[1].resize( down_layer_size );
00120     
00121     down_units_params_neg_stats.resize( 2 );
00122     down_units_params_neg_stats[0].resize( down_layer_size );
00123     down_units_params_neg_stats[1].resize( down_layer_size );
00124     
00125     for( int i=0 ; i<down_layer_size ; i++ )
00126     {
00127         char dut_i = down_units_types[i];
00128         if( dut_i != 'q' ) // not quadratic activation unit
00129             PLERROR( "RBMQLParameters::build_() - value '%c' for"
00130                      " down_units_types[%d]\n"
00131                      "should be 'q'.\n",
00132                      dut_i, i );
00133     }
00134 
00135     up_units_bias.resize( up_layer_size );
00136     up_units_bias_pos_stats.resize( up_layer_size );
00137     up_units_bias_neg_stats.resize( up_layer_size );
00138     for( int i=0 ; i<up_layer_size ; i++ )
00139     {
00140         char uut_i = up_units_types[i];
00141         if( uut_i != 'l' ) // not linear activation unit
00142             PLERROR( "RBMQLParameters::build_() - value '%c' for"
00143                      " up_units_types[%d]\n"
00144                      "should be 'l'.\n",
00145                      uut_i, i );
00146     }
00147 
00148     if( needs_forget )
00149         forget();
00150 
00151     clearStats();
00152 }
00153 
00154 void RBMQLParameters::build()
00155 {
00156     inherited::build();
00157     build_();
00158 }
00159 
00160 
00161 void RBMQLParameters::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00162 {
00163     inherited::makeDeepCopyFromShallowCopy(copies);
00164 
00165     deepCopyField(weights, copies);
00166     deepCopyField(up_units_bias, copies);
00167     deepCopyField(down_units_params, copies);
00168     deepCopyField(weights_pos_stats, copies);
00169     deepCopyField(weights_neg_stats, copies);
00170     deepCopyField(up_units_bias_pos_stats, copies);
00171     deepCopyField(up_units_bias_neg_stats, copies);
00172     deepCopyField(down_units_params_pos_stats, copies);
00173     deepCopyField(down_units_params_neg_stats, copies);
00174 }
00175 
00176 void RBMQLParameters::accumulatePosStats( const Vec& down_values,
00177                                           const Vec& up_values )
00178 {
00179     // weights_pos_stats += up_values * down_values'
00180     externalProductAcc( weights_pos_stats, up_values, down_values );
00181 
00182     down_units_params_pos_stats[0] += down_values;
00183     up_units_bias_pos_stats += up_values;
00184 
00185     for(int i=0 ; i<down_layer_size ; ++i) { 
00186         down_units_params_pos_stats[1][i] += 2 * down_units_params[1][i] *
00187             down_values[i] *down_values[i];
00188     }
00189     
00190     pos_count++;
00191 }
00192 
00193 void RBMQLParameters::accumulateNegStats( const Vec& down_values,
00194                                                const Vec& up_values )
00195 {
00196     // weights_pos_stats += up_values * down_values'
00197     externalProductAcc( weights_neg_stats, up_values, down_values );
00198 
00199     down_units_params_neg_stats[0] += down_values;
00200     up_units_bias_neg_stats += up_values;
00201 
00202     for(int i=0 ; i<down_layer_size ; ++i) { 
00203         down_units_params_neg_stats[1][i] += 2 * down_units_params[1][i] *
00204             down_values[i] *down_values[i];
00205     }
00206     
00207     neg_count++;
00208 
00209     
00210 }
00211 
00212 void RBMQLParameters::update()
00213 {
00214     // updates parameters
00215     //weights -= learning_rate * (weights_pos_stats/pos_count
00216     //                              - weights_neg_stats/neg_count)
00217     weights_pos_stats /= pos_count;
00218     weights_neg_stats /= neg_count;
00219     weights_pos_stats -= weights_neg_stats;
00220     weights_pos_stats *= learning_rate;
00221     weights -= weights_pos_stats;
00222     
00223     for( int i=0 ; i<up_layer_size ; i++ )
00224     {
00225         up_units_bias[i] -=
00226             learning_rate * (up_units_bias_pos_stats[i]/pos_count
00227                              - up_units_bias_neg_stats[i]/neg_count);
00228     }
00229 
00230     for( int i=0 ; i<down_layer_size ; i++ )
00231     {
00232         // update the bias of the down units
00233         down_units_params[0][i] -=
00234             learning_rate * (down_units_params_pos_stats[0][i]/pos_count
00235                              - down_units_params_neg_stats[0][i]/neg_count);
00236         
00237         // update the quadratic term of the down units
00238         down_units_params[1][i] -=
00239             learning_rate * (down_units_params_pos_stats[1][i]/pos_count
00240                              - down_units_params_neg_stats[1][i]/neg_count);
00241         
00242 //        cout << "bias[i] " << down_units_params[0][i] << endl ; 
00243 //        cout << "a[i]    " << down_units_params[1][i] << endl ; 
00244     }
00245 
00246     clearStats();
00247 }
00248 
00249 void RBMQLParameters::clearStats()
00250 {
00251     weights_pos_stats.clear();
00252     weights_neg_stats.clear();
00253 
00254     down_units_params_pos_stats[0].clear();
00255     down_units_params_pos_stats[1].clear();
00256     
00257     down_units_params_neg_stats[0].clear();
00258     down_units_params_neg_stats[1].clear();
00259 
00260     up_units_bias_pos_stats.clear();
00261     up_units_bias_neg_stats.clear();
00262 
00263     pos_count = 0;
00264     neg_count = 0;
00265 }
00266 
00267 void RBMQLParameters::computeUnitActivations
00268     ( int start, int length, const Vec& activations ) const
00269 {
00270     //activations[2 * i] = mu of unit (i - start)
00271     //activations[2 * i + 1] = sigma of unit (i - start)
00272     if( going_up )
00273     {
00274         PLASSERT( activations.length() == length );
00275         PLASSERT( start+length <= up_layer_size );
00276 //        product( weights, input_vec , activations) ;
00277         product( activations , weights, input_vec ) ;
00278         activations += up_units_bias ; 
00279          
00280     }
00281     else
00282     {
00283         // mu = activations[i] = -(sum_j weights(i,j) input_vec[j] + b[i])
00284         //                    / (2 * up_units_params[i][1]^2)
00285         
00286         // TODO: change it to work with start and length
00287         PLASSERT( start+length <= down_layer_size );
00288         Mat activations_mat = activations.toMat( activations.length()/2 , 2);
00289         Mat mu = activations_mat.column(0) ; 
00290         Mat sigma = activations_mat.column(1) ; 
00291         
00292         transposeProduct( mu , weights , input_vec.toMat(input_vec.length() , 1) );
00293         
00294         // activations[i-start] = sum_j weights(j,i) input_vec[j] + b[i]
00295         for(int i=0 ; i<length ; ++i) { 
00296             real a_i = down_units_params[1][i] ; 
00297             mu[i][0] = - (mu[i][0] + down_units_params[0][i]) / (2 * a_i * a_i)     ; 
00298             sigma[i][0] = 1 / (2. * a_i * a_i) ; 
00299         }
00300 
00301     }
00302 }
00303 
00305 void RBMQLParameters::bpropUpdate(const Vec& input, const Vec& output,
00306                                   Vec& input_gradient,
00307                                   const Vec& output_gradient)
00308 {
00309     PLASSERT( input.size() == down_layer_size );
00310     PLASSERT( output.size() == up_layer_size );
00311     PLASSERT( output_gradient.size() == up_layer_size );
00312     input_gradient.resize( down_layer_size );
00313 
00314     // input_gradient = weights' * output_gradient
00315     transposeProduct( input_gradient, weights, output_gradient );
00316 
00317     // weights -= learning_rate * output_gradient * input'
00318     externalProductAcc( weights, (-learning_rate)*output_gradient, input );
00319 
00320     // (up) bias -= learning_rate * output_gradient
00321     multiplyAcc( up_units_bias, output_gradient, -learning_rate );
00322 
00323 }
00324 
00327 void RBMQLParameters::forget()
00328 {
00329     if( initialization_method == "zero" )
00330         weights.clear();
00331     else
00332     {
00333         if( !random_gen )
00334             random_gen = new PRandom();
00335 
00336         real d = 1. / max( down_layer_size, up_layer_size );
00337         if( initialization_method == "uniform_sqrt" )
00338             d = sqrt( d );
00339 
00340         random_gen->fill_random_uniform( weights, -d, d );
00341     }
00342 
00343     down_units_params[0].clear();
00344     down_units_params[1].fill(1.);
00345 
00346     up_units_bias.clear();
00347 
00348     clearStats();
00349 }
00350 
00351 
00352 /* THIS METHOD IS OPTIONAL
00357 void RBMQLParameters::finalize()
00358 {
00359 }
00360 */
00361 
00363 int RBMQLParameters::nParameters(bool share_up_params, bool share_down_params) const
00364 {
00365     int m = weights.size() + (share_up_params?up_units_bias.size():0);
00366     if (share_down_params)
00367         for (int i=0;i<down_units_params.length();i++)
00368             m += down_units_params[i].size();
00369     return m;
00370 }
00371 
00377 Vec RBMQLParameters::makeParametersPointHere(const Vec& global_parameters, bool share_up_params, bool share_down_params)
00378 {
00379     int n = nParameters(share_up_params,share_down_params);
00380     int m = global_parameters.size();
00381     if (m<n)
00382         PLERROR("RBMLLParameters::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n);
00383     real* p = global_parameters.data();
00384     weights.makeSharedValue(p,weights.size());
00385     p+=weights.size();
00386     if (share_up_params)
00387     {
00388         up_units_bias.makeSharedValue(p,up_units_bias.size());
00389         p+=up_units_bias.size();
00390     }
00391     if (share_down_params)
00392         for (int i=0;i<down_units_params.length();i++)
00393         {
00394             down_units_params[i].makeSharedValue(p,down_units_params[i].size());
00395             p+=down_units_params[i].size();
00396         }
00397     return global_parameters.subVec(n,m-n);
00398 }
00399 
00400 
00401 } // end of namespace PLearn
00402 
00403 
00404 /*
00405   Local Variables:
00406   mode:c++
00407   c-basic-offset:4
00408   c-file-style:"stroustrup"
00409   c-file-offsets:((innamespace . 0)(inline-open . 0))
00410   indent-tabs-mode:nil
00411   fill-column:79
00412   End:
00413 */
00414 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines