PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // NxProfileLearner.cc 00004 // 00005 // Copyright (C) 2007 Pierre-Antoine Manzagol 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pierre-Antoine Manzagol 00036 00040 #include "NxProfileLearner.h" 00041 00042 namespace PLearn { 00043 using namespace std; 00044 00045 PLEARN_IMPLEMENT_OBJECT( 00046 NxProfileLearner, 00047 "ONE LINE DESCRIPTION", 00048 "MULTI-LINE \nHELP"); 00049 00050 NxProfileLearner::NxProfileLearner() : profile_dim(1), 00051 slr(0.0), 00052 dc(0.0), 00053 L1_penalty_factor(0.0), 00054 L2_penalty_factor(0.0), 00055 n_films(17770), 00056 n_users(480189) 00057 /* ### Initialize all fields to their default value here */ 00058 { 00059 // ... 00060 00061 // ### You may (or not) want to call build_() to finish building the object 00062 // ### (doing so assumes the parent classes' build_() have been called too 00063 // ### in the parent classes' constructors, something that you must ensure) 00064 00065 // ### If this learner needs to generate random numbers, uncomment the 00066 // ### line below to enable the use of the inherited PRandom object. 00067 // random_gen = new PRandom(); 00068 if( !random_gen) 00069 random_gen = new PRandom; 00070 } 00071 00072 void NxProfileLearner::declareOptions(OptionList& ol) 00073 { 00074 // ### Declare all of this object's options here. 00075 // ### For the "flags" of each option, you should typically specify 00076 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00077 // ### OptionBase::tuningoption. If you don't provide one of these three, 00078 // ### this option will be ignored when loading values from a script. 00079 // ### You can also combine flags, for example with OptionBase::nosave: 00080 // ### (OptionBase::buildoption | OptionBase::nosave) 00081 00082 declareOption(ol, "profile_dim", &NxProfileLearner::profile_dim, 00083 OptionBase::buildoption, 00084 "Dimension of the profiles to learn."); 00085 declareOption(ol, "slr", &NxProfileLearner::slr, 00086 OptionBase::buildoption, 00087 "Starting learning rate."); 00088 declareOption(ol, "dc", &NxProfileLearner::dc, 00089 OptionBase::buildoption, 00090 "Learning rate decrease constant."); 00091 00092 declareOption(ol, "L1_penalty_factor", 00093 &NxProfileLearner::L1_penalty_factor, 00094 OptionBase::buildoption, 00095 "Optional (default=0) factor of L1 regularization term, i.e.\n" 00096 "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| during training.\n" 00097 "Gets multiplied by the learning rate."); 00098 declareOption(ol, "L2_penalty_factor", 00099 &NxProfileLearner::L2_penalty_factor, 00100 OptionBase::buildoption, 00101 "Optional (default=0) factor of L2 regularization term, i.e.\n" 00102 "minimize 0.5 * L2_penalty_factor * sum_{ij} weights(i,j)^2 during training.\n" 00103 "Gets multiplied by the learning rate."); 00104 00105 declareOption(ol, "ngest_films", 00106 &NxProfileLearner::ngest_films, 00107 OptionBase::buildoption, 00108 "Optional NatGradEstimator object for the gradients on the parameters OF ALL USERS!\n" 00109 "NOT A TEMPLATE!"); 00110 00111 declareOption(ol, "ngest_users", 00112 &NxProfileLearner::ngest_users, 00113 OptionBase::buildoption, 00114 "Optional NatGradEstimator object for the gradients on the parameters OF ALL FILMS!\n" 00115 "NOT A TEMPLATE!"); 00116 00117 // Now call the parent class' declareOptions 00118 inherited::declareOptions(ol); 00119 } 00120 00121 void NxProfileLearner::build_() 00122 { 00123 // ### This method should do the real building of the object, 00124 // ### according to set 'options', in *any* situation. 00125 // ### Typical situations include: 00126 // ### - Initial building of an object from a few user-specified options 00127 // ### - Building of a "reloaded" object: i.e. from the complete set of 00128 // ### all serialised options. 00129 // ### - Updating or "re-building" of an object after a few "tuning" 00130 // ### options have been modified. 00131 // ### You should assume that the parent class' build_() has already been 00132 // ### called. 00133 00134 if( !train_set ) 00135 return; 00136 00137 cout << "build()" << endl; 00138 00139 if( L1_penalty_factor < 0. ) 00140 PLWARNING("NxProfileLearner::build:\n" 00141 "L1_penalty_factor is negative!\n"); 00142 if( L2_penalty_factor < 0. ) 00143 PLWARNING("NxProfileLearner::build:\n" 00144 "L2_penalty_factor is negative!\n"); 00145 if( (slr*L2_penalty_factor) > 1. ) 00146 PLWARNING("NxProfileLearner::build:\n" 00147 "slr = %f is too large for L2_penalty_factor!\n", slr); 00148 00149 f_profiles.resize(n_films, profile_dim); 00150 u_profiles.resize(n_users, profile_dim); 00151 00152 forget(); 00153 } 00154 00155 // ### Nothing to add here, simply calls build_ 00156 void NxProfileLearner::build() 00157 { 00158 inherited::build(); 00159 build_(); 00160 } 00161 00162 00163 void NxProfileLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00164 { 00165 inherited::makeDeepCopyFromShallowCopy(copies); 00166 00167 deepCopyField(ngest_films, copies); 00168 deepCopyField(ngest_users, copies); 00169 deepCopyField(f_profiles, copies); 00170 deepCopyField(u_profiles, copies); 00171 00172 // ### Remove this line when you have fully implemented this method. 00173 //PLERROR("NxProfileLearner::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!"); 00174 } 00175 00176 00177 int NxProfileLearner::outputsize() const 00178 { 00179 // Compute and return the size of this learner's output (which typically 00180 // may depend on its inputsize(), targetsize() and set options). 00181 return 1; 00182 } 00183 00184 void NxProfileLearner::forget() 00185 { 00189 00196 inherited::forget(); 00197 00198 cout << "forget" << endl; 00199 00200 //real delta = 1/sqrt(real(layer_sizes[i])); 00201 real delta = 1.0/sqrt(real(profile_dim));; 00202 random_gen->fill_random_uniform(u_profiles,-delta,delta); 00203 random_gen->fill_random_uniform(f_profiles,-delta,delta); 00204 stage = 0; 00205 00206 } 00207 00208 void NxProfileLearner::train() 00209 { 00210 // The role of the train method is to bring the learner up to 00211 // stage==nstages, updating train_stats with training costs measured 00212 // on-line in the process. 00213 00214 static Vec input; // static so we don't reallocate memory each time... 00215 static Vec target; // (but be careful that static means shared!) 00216 input.resize(inputsize()); // the train_set's inputsize() 00217 target.resize(targetsize()); // the train_set's targetsize() 00218 real weight, error, lr; 00219 00220 static Vec f_grad, f_natgrad; 00221 static Vec u_grad, u_natgrad; 00222 f_grad.resize(profile_dim); 00223 f_natgrad.resize(profile_dim); 00224 u_grad.resize(profile_dim); 00225 u_natgrad.resize(profile_dim); 00226 00227 // indexes for natural gradient estimator calls 00228 // Since calls must have continuous 't's, and we exceed the int limit 00229 // we need to do some hacking 00230 static int ngf_idx=0; 00231 static int ngu_idx=0; 00232 00233 // This generic PLearner method does a number of standard stuff useful for 00234 // (almost) any learner, and return 'false' if no training should take 00235 // place. See PLearner.h for more details. 00236 if (!initTrain()) 00237 return; 00238 00239 int nsamples = train_set->length(); 00240 // clear statistics of previous epoch 00241 train_stats->forget(); 00242 00243 00244 while(stage<nstages) 00245 { 00246 PP<ProgressBar> pb; 00247 if( report_progress ) 00248 pb = new ProgressBar( "Training "+classname(), nsamples); 00249 00250 // TODO In case minibarches are used, remember to modify in the case of 00251 // natural gradient updates. 00252 lr = slr/(1.0 + stage*dc); 00253 real L1_delta = lr * L1_penalty_factor; 00254 real L2_scaling = 1. - lr * L2_penalty_factor; 00255 00256 for(int i=0; i<nsamples; i++) { 00257 train_set->getExample(i, input, target, weight); 00258 00259 PLASSERT( (input[0]>=0) && (input[0]<n_films) && (input[1]>=0) && (input[1]<n_users) ); 00260 00261 // Save a function call by not using the functions (computeOutput, 00262 // etc.). Also, we're using squared error cost, but dropping the 2 and taking 00263 // the negative already. 00264 error = target[0] - dot( f_profiles((int)input[0]), u_profiles((int)input[1]) ); 00265 00266 // WHAT FOLLOWS SHOULD PROBABLY BE MADE MORE EFFICIENT 00267 // examples: 00268 // - for normal gradient multiply lr and error (2 scalars) 00269 // before multiplying the vector 00270 // - Consider operating on the vector's elements 00271 //- Use the updated film parameters in the user's update 00272 00273 // the gradients 00274 f_grad = error * u_profiles((int)input[1]); 00275 u_grad = error * f_profiles((int)input[0]); 00276 00277 /* // Update the parameters 00278 if( !ngest_films ) { 00279 f_profiles((int)input[0]) += lr * f_grad; 00280 } else { 00281 (*ngest_films)( ngf_idx, f_grad, f_natgrad ); 00282 // do index shananigans 00283 ngf_idx++; 00284 ngf_idx = ngf_idx%ngest_films->cov_minibatch_size + ngest_films->cov_minibatch_size; 00285 ngest_films->previous_t = ngf_idx-1; 00286 // perform parameter update 00287 f_profiles((int)input[0]) += lr * f_natgrad; 00288 } 00289 00290 if( !ngest_users ) { 00291 u_profiles((int)input[1]) += lr * u_grad; 00292 } else { 00293 (*ngest_users)( ngu_idx, u_grad, u_natgrad ); 00294 // do index shananigans 00295 ngu_idx++; 00296 ngu_idx = ngu_idx%ngest_users->cov_minibatch_size + ngest_users->cov_minibatch_size; 00297 ngest_users->previous_t = ngu_idx-1; 00298 // perform parameter update 00299 u_profiles((int)input[1]) += lr * u_natgrad; 00300 } 00301 */ 00302 // L1 regularization 00303 if( L1_penalty_factor != 0. ) { 00304 for( int d=0; d<profile_dim; d++ ) { 00305 // films 00306 if( f_profiles((int)input[0], d) > L1_delta ) 00307 f_profiles((int)input[0], d) -= L1_delta; 00308 else if( f_profiles((int)input[0], d) < -L1_delta ) 00309 f_profiles((int)input[0], d) += L1_delta; 00310 else 00311 f_profiles((int)input[0], d) = 0.; 00312 // users 00313 if( u_profiles((int)input[1], d) > L1_delta ) 00314 u_profiles((int)input[1], d) -= L1_delta; 00315 else if( u_profiles((int)input[1], d) < -L1_delta ) 00316 u_profiles((int)input[1], d) += L1_delta; 00317 else 00318 u_profiles((int)input[1], d) = 0.; 00319 } 00320 } 00321 00322 // L2 regularization 00323 if( L2_penalty_factor != 0. ) { 00324 f_profiles((int)input[0]) *= L2_scaling; 00325 u_profiles((int)input[1]) *= L2_scaling; 00326 } 00327 00328 //train_stats->update(train_costs) 00329 if( pb ) 00330 pb->update(i); 00331 00332 } 00333 ++stage; 00334 train_stats->finalize(); // finalize statistics for this epoch 00335 } 00336 00337 00338 } 00339 00340 // Compute the output from the input. 00341 void NxProfileLearner::computeOutput(const Vec& input, Vec& output) const 00342 { 00343 int nout = outputsize(); 00344 output.resize(nout); 00345 00346 PLASSERT( (input[0]>=0) && (input[0]<n_films) && (input[1]>=0) && (input[1]<n_users) ); 00347 00348 output[0] = dot( f_profiles((int)input[0]), u_profiles((int)input[1]) ); 00349 00350 /* cout << " f " << filmProfileID << " " << f_profiles(filmProfileID) << endl; 00351 cout << " u " << userProfileID << " " << u_profiles(userProfileID) << endl; 00352 cout << "output[0] " << output[0];*/ 00353 } 00354 00355 // Compute the costs from *already* computed output. 00356 void NxProfileLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 00357 const Vec& target, Vec& costs) const 00358 { 00359 real error = target[0] - output[0]; 00360 // the 16 is to put the error on the 1-5 rating basis 00361 costs[0] = 16.0 * error * error; 00362 //cout << " error " << error << " cost[0] " << costs[0] << endl; 00363 } 00364 00365 TVec<string> NxProfileLearner::getTestCostNames() const 00366 { 00367 // Return the names of the costs computed by computeCostsFromOutputs 00368 // (these may or may not be exactly the same as what's returned by 00369 // getTrainCostNames). 00370 return getTrainCostNames(); 00371 } 00372 00373 TVec<string> NxProfileLearner::getTrainCostNames() const 00374 { 00375 // Return the names of the objective costs that the train method computes 00376 // and for which it updates the VecStatsCollector train_stats 00377 // (these may or may not be exactly the same as what's returned by 00378 // getTestCostNames). 00379 TVec<string> costs; 00380 costs.resize(1); 00381 costs[0]="MSE"; 00382 return costs; 00383 } 00384 00385 00386 } // end of namespace PLearn 00387 00388 00389 /* 00390 Local Variables: 00391 mode:c++ 00392 c-basic-offset:4 00393 c-file-style:"stroustrup" 00394 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00395 indent-tabs-mode:nil 00396 fill-column:79 00397 End: 00398 */ 00399 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :