PLearn 0.1
NnlmWordRepresentationLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NnlmWordRepresentationLayer.cc
00004 //
00005 // Copyright (C) 2006 Pierre-Antoine Manzagol
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pierre-Antoine Manzagol
00036 
00041 #include "NnlmWordRepresentationLayer.h"
00042 
00043 namespace PLearn {
00044 using namespace std;
00045 
00046 PLEARN_IMPLEMENT_OBJECT(
00047     NnlmWordRepresentationLayer,
00048     "Implements the word representation layer for the online NNLM.",
00049     "MULTI-LINE \nHELP");
00050 
00051 NnlmWordRepresentationLayer::NnlmWordRepresentationLayer() :
00052     OnlineLearningModule(),
00053     vocabulary_size( -1 ),
00054     word_representation_size( -1 ),
00055     context_size( -1 ),
00056     start_learning_rate( 0.001 ),
00057     decrease_constant( 0 ),
00058     step_number( 0 ),
00059     learning_rate( 0.0 )    
00060 {
00061     // ### You may (or not) want to call build_() to finish building the object
00062     // ### (doing so assumes the parent classes' build_() have been called too
00063     // ### in the parent classes' constructors, something that you must ensure)
00064 }
00065 
00066 void NnlmWordRepresentationLayer::declareOptions(OptionList& ol)
00067 {
00068 
00069     declareOption(ol, "vocabulary_size",
00070                   &NnlmWordRepresentationLayer::vocabulary_size,
00071                   OptionBase::buildoption,
00072                   "size of vocabulary used - defines the virtual input size");
00073 
00074     declareOption(ol, "word_representation_size",
00075                   &NnlmWordRepresentationLayer::word_representation_size,
00076                   OptionBase::buildoption,
00077                   "size of the real distributed word representation");
00078 
00079     declareOption(ol, "context_size",
00080                   &NnlmWordRepresentationLayer::context_size,
00081                   OptionBase::buildoption,
00082                   "size of word context");
00083 
00084     declareOption(ol, "start_learning_rate",
00085                   &NnlmWordRepresentationLayer::start_learning_rate,
00086                   OptionBase::buildoption,
00087                   "Learning-rate of stochastic gradient optimization");
00088 
00089     declareOption(ol, "decrease_constant",
00090                   &NnlmWordRepresentationLayer::decrease_constant,
00091                   OptionBase::buildoption,
00092                   "Decrease constant of stochastic gradient optimization");
00093 
00094     // * Learnt
00095 
00096     declareOption(ol, "step_number", &NnlmWordRepresentationLayer::step_number,
00097                   OptionBase::learntoption,
00098                   "The step number, incremented after each update.");
00099 
00100     declareOption(ol, "weights", &NnlmWordRepresentationLayer::weights,
00101                   OptionBase::learntoption,
00102                   "Input weights of the neurons (one row per neuron, no bias).");
00103 
00104 
00105     // Now call the parent class' declareOptions
00106     inherited::declareOptions(ol);
00107 
00108 }
00109 
00110 void NnlmWordRepresentationLayer::build_()
00111 {
00112 
00113     // *** Some variables are connected... 
00114     // for now we overwrite these
00115     input_size = context_size;
00116     output_size = context_size * word_representation_size;
00117 
00118 
00119     // *** A few sanity checks
00120     if( input_size <= 0 )
00121     {
00122         PLERROR("NnlmWordRepresentationLayer::build_: 'input_size' <= 0 (%i).\n"
00123                 "You should set it to a positive integer.\n", input_size);
00124     }
00125     else if( word_representation_size * context_size != output_size )
00126     {
00127         PLERROR("NnlmWordRepresentationLayer::build_: 'output_size' inconsistent with\n"
00128                   " 'word_representation_size * input_size': %i != ( %i * %i)\n"
00129                   , output_size, word_representation_size, input_size);
00130     }
00131     else if( vocabulary_size <= 0 )
00132     {
00133         PLERROR("NnlmWordRepresentationLayer::build_: 'vocabulary_size' <= 0(%i).\n"
00134                   , vocabulary_size);
00135     }
00136 
00137 
00138     // *** Initialize weights if not loaded
00139     if( weights.size() == 0 )   {
00140                                 forget();
00141     }
00142 
00143 }
00144 
00145 // ### Nothing to add here, simply calls build_
00146 void NnlmWordRepresentationLayer::build()
00147 {
00148     inherited::build();
00149     build_();
00150 }
00151 
00152 
00153 void NnlmWordRepresentationLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00154 {
00155     inherited::makeDeepCopyFromShallowCopy(copies);
00156 
00157     deepCopyField(weights, copies);
00158 
00159     // ### Remove this line when you have fully implemented this method.
00160     //PLERROR("NnlmWordRepresentationLayer::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00161 }
00162 
00166 void NnlmWordRepresentationLayer::fprop(const Vec& input, Vec& output) const
00167 {
00168 
00169     // TODO only do these in debug
00170     // *** Sanity checks 
00171 
00172     // check the input holds input_size hot indexes
00173     int in_size = input.size();
00174     if( in_size != input_size )
00175     {
00176         PLERROR("NnlmWordRepresentationLayer::fprop: 'input.size()' should be equal\n"
00177                 " to 'input_size' (%i != %i)\n", in_size, input_size);
00178     }
00179     //
00180     int out_size = output.size();
00181     if( out_size != output_size )
00182     {
00183         PLERROR("NnlmWordRepresentationLayer::fprop: 'output.size()' should be equal\n"
00184                 " to 'output_size' (%i != %i)\n", out_size, output_size);
00185     }
00186 
00187 
00188     // magnitude of index check
00189     for( int i=0; i<input_size; i++)  {
00190       if( input[i] >= vocabulary_size || input[i] < 0 )
00191       {
00192           PLERROR("NnlmWordRepresentationLayer::fprop: 'input[%i]' should be smaller\n"
00193                   " than 'vocabulary_size' (%i !< %i)\n",
00194           i, input[i], vocabulary_size);
00195       }
00196 
00197       output.subVec( i*word_representation_size, word_representation_size ) << weights( (int) input[i] );
00198     }
00199 
00200 
00201 }
00202 
00213 void NnlmWordRepresentationLayer::bpropUpdate(const Vec& input, const Vec& output,
00214                                const Vec& output_gradient)
00215 {
00216 
00217     int in_size = input.size();
00218     int out_size = output.size();
00219     int og_size = output_gradient.size();
00220 
00221     // size check
00222     if( in_size != input_size )
00223     {
00224         PLERROR("NnlmWordRepresentationLayer::bpropUpdate: 'input.size()' should be equal\n"
00225                 " to 'input_size' (%i != %i)\n", in_size, input_size);
00226     }
00227     if( out_size != output_size )
00228     {
00229         PLERROR("NnlmWordRepresentationLayer::bpropUpdate: 'output.size()' should be"
00230                 " equal\n"
00231                 " to 'output_size' (%i != %i)\n", out_size, output_size);
00232     }
00233     if( og_size != output_size )
00234     {
00235         PLERROR("NnlmWordRepresentationLayer::bpropUpdate: 'output_gradient.size()'"
00236                 " should\n"
00237                 " be equal to 'output_size' (%i != %i)\n",
00238                 og_size, output_size);
00239     }
00240 
00241 
00242     learning_rate = start_learning_rate / ( 1.0 + decrease_constant * step_number);
00243 
00244     //cout << "NnlmWordRepresentationLayer::bpropUpdate -> output_gradient is " << output_gradient << endl;
00245 
00246     // magnitude of index check and update
00247     for( int i=0; i<input_size; i++)  {
00248       if( input[i] >= vocabulary_size  || input[i] < 0 )
00249       {
00250           PLERROR("NnlmWordRepresentationLayer::bpropUpdate: 'input[%i]' should be smaller\n"
00251                   " than 'vocabulary_size' (%i !< %i)\n",
00252           i, input[i], vocabulary_size);
00253       }
00254 // MISTAKE???????
00255 // MISTAKE???????
00256       /*for(int j=0; j < output_size; j++)  {
00257           weights( (int) input[i], j%word_representation_size) -= learning_rate * output_gradient[j];
00258       }*/
00259 
00260 //cout << "word rep gradient ";
00261       for(int j=0; j < word_representation_size; j++)  {
00262           weights( (int) input[i], j) -= learning_rate * output_gradient[j+i*word_representation_size];
00263 //cout << - learning_rate * output_gradient[j+i*word_representation_size] << " ";
00264       }
00265 //cout << endl;
00266 
00267     }
00268 
00269     step_number++;
00270 
00271 }
00272 
00273 
00274 /* THIS METHOD IS OPTIONAL
00277 void NnlmWordRepresentationLayer::bpropUpdate(const Vec& input, const Vec& output,
00278                                Vec& input_gradient,
00279                                const Vec& output_gradient)
00280 {
00281 }
00282 */
00283 
00286 void NnlmWordRepresentationLayer::forget()
00287 {
00288 
00289     // *** Weights
00290 
00291     resetWeights();
00292 
00293    // TODO add an option for the seed
00294     if( !random_gen )   {
00295         random_gen = new PRandom( 1 );
00296     }
00297 
00298     //real r = 1.0 / sqrt(input_size);
00299     //random_gen->fill_random_uniform(weights,-r,r);
00300                 random_gen->fill_random_uniform(weights,-1.0,1.0);
00301         
00302     // *** 
00303     step_number = 0;
00304 
00305 
00306 }
00307 
00308 /* THIS METHOD IS OPTIONAL
00313 void NnlmWordRepresentationLayer::finalize()
00314 {
00315 }
00316 */
00317 
00318 /* THIS METHOD IS OPTIONAL
00321 bool NnlmWordRepresentationLayer::bpropDoesNothing()
00322 {
00323 }
00324 */
00325 
00326 /* THIS METHOD IS OPTIONAL
00336 void NnlmWordRepresentationLayer::bbpropUpdate(const Vec& input, const Vec& output,
00337                                 const Vec& output_gradient,
00338                                 const Vec& output_diag_hessian)
00339 {
00340 }
00341 */
00342 
00343 /* THIS METHOD IS OPTIONAL
00350 void NnlmWordRepresentationLayer::bbpropUpdate(const Vec& input, const Vec& output,
00351                                 Vec& input_gradient,
00352                                 const Vec& output_gradient,
00353                                 Vec& input_diag_hessian,
00354                                 const Vec& output_diag_hessian)
00355 {
00356 }
00357 */
00358 
00359 void NnlmWordRepresentationLayer::resetWeights()
00360 {
00361     weights.resize( vocabulary_size, word_representation_size );
00362     weights.fill( 0 );
00363 }
00364 
00365 
00366 } // end of namespace PLearn
00367 
00368 
00369 /*
00370   Local Variables:
00371   mode:c++
00372   c-basic-offset:4
00373   c-file-style:"stroustrup"
00374   c-file-offsets:((innamespace . 0)(inline-open . 0))
00375   indent-tabs-mode:nil
00376   fill-column:79
00377   End:
00378 */
00379 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines