PLearn 0.1
NnlmOutputLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NnlmOutputLayer.cc
00004 //
00005 // Copyright (C) 2006 Pierre-Antoine Manzagol
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pierre-Antoine Manzagol
00036 
00040 #include "NnlmOutputLayer.h"
00041 
00042 
00043 namespace PLearn {
00044 using namespace std;
00045 
00046 PLEARN_IMPLEMENT_OBJECT(
00047     NnlmOutputLayer,
00048     "Implements the output layer for the Neural Network Language Model.",
00049     "MULTI-LINE \nHELP");
00050 
00051 
00053 // class wordAndProb
00056 class wordAndProb {
00057 public:
00058   wordAndProb(int wt, double p) : wordtag(wt), probability(p){};
00059   int wordtag;
00060   double probability;
00061 };
00062 bool wordAndProbGT(const wordAndProb &a, const wordAndProb &b) 
00063 {
00064     return a.probability > b.probability;
00065 }
00066 
00068 // NnlmOutputLayer
00070 NnlmOutputLayer::NnlmOutputLayer() :
00071     OnlineLearningModule(),
00072     target_cardinality( -1 ),
00073     context_cardinality( -1 ),
00074     sigma2min( 0.001 ), // ### VERY IMPORTANT!!!
00075     dl_start_learning_rate( 0.0 ),
00076     dl_decrease_constant( 0.0 ),
00077     el_start_discount_factor( 0.01 ), // ### VERY IMPORTANT!!!
00078     step_number( 0 ),
00079     umc( 0.999999 ), // ###
00080     learning( LEARNING_DISCRIMINANT ),
00081     cost( COST_DISCR ),
00082     target( -1 ),
00083     the_real_target( -1 ),
00084     context( -1 ),
00085     s( 0.0 ),
00086     g_exponent( 0.0 ),
00087     log_g_det_covariance( -REAL_MAX ),
00088     log_g_normalization( -REAL_MAX ),
00089     log_sum_p_ru( -REAL_MAX ),
00090     is_learning( false )
00091 {
00092     // ### You may (or not) want to call build_() to finish building the object
00093     // ### (doing so assumes the parent classes' build_() have been called too
00094     // ### in the parent classes' constructors, something that you must ensure)
00095 }
00096 
00098 // declareOptions
00100 void NnlmOutputLayer::declareOptions(OptionList& ol)
00101 {
00102     // * Build Options *
00103     // * Build Options *
00104     declareOption(ol, "target_cardinality",
00105                   &NnlmOutputLayer::target_cardinality,
00106                   OptionBase::buildoption,
00107                   "Number of target tags.");
00108 
00109     declareOption(ol, "context_cardinality",
00110                   &NnlmOutputLayer::context_cardinality,
00111                   OptionBase::buildoption,
00112                   "Number of context tags (usually, there will be the additional 'missing' tag).");
00113 
00114     declareOption(ol, "sigma2min",
00115                   &NnlmOutputLayer::sigma2min,
00116                   OptionBase::buildoption,
00117                   "Minimal value for the diagonal covariance matrix.");
00118 
00119     declareOption(ol, "dl_start_learning_rate",
00120                   &NnlmOutputLayer::dl_start_learning_rate,
00121                   OptionBase::buildoption,
00122                   "Discriminant learning start learning rate.");
00123     declareOption(ol, "dl_decrease_constant",
00124                   &NnlmOutputLayer::dl_decrease_constant,
00125                   OptionBase::buildoption,
00126                   "Discriminant learning decrease constant.");
00127 
00128     declareOption(ol, "el_start_discount_factor",
00129                   &NnlmOutputLayer::el_start_discount_factor,
00130                   OptionBase::buildoption,
00131                   "How much weight is given to the first example of a given word with respect to the last, ex 0,2.");
00132 /*    declareOption(ol, "el_decrease_constant",
00133                   &NnlmOutputLayer::el_decrease_constant,
00134                   OptionBase::buildoption,
00135                   "Empirical learning decrease constant of gaussian parameters discount rate.");
00136 */
00137 
00138     // * Learnt Options *
00139     // * Learnt Options *
00140     declareOption(ol, "step_number", &NnlmOutputLayer::step_number,
00141                   OptionBase::learntoption,
00142                   "The step number, incremented after each update.");
00143 
00144     declareOption(ol, "umc", &NnlmOutputLayer::umc,
00145                   OptionBase::learntoption,
00146                   "The uniform mixture coefficient. p(r|i) = umc p_gauss + (1-umc) p_uniform");
00147 
00148     declareOption(ol, "pi", &NnlmOutputLayer::pi,
00149                   OptionBase::learntoption,
00150                   "pi[t] -> moyenne empirique de y==t" );
00151     declareOption(ol, "mu", &NnlmOutputLayer::mu,
00152                   OptionBase::learntoption,
00153                   "mu(t) -> moyenne empirique des r quand y==t" );
00154     declareOption(ol, "sigma2", &NnlmOutputLayer::sigma2,
00155                   OptionBase::learntoption,
00156                   "sigma2(t) -> variance empirique des r quand y==t" );
00157 
00158     declareOption(ol, "sumR", &NnlmOutputLayer::sumR,
00159                   OptionBase::learntoption,
00160                   "sumR(i) -> sum_t r_t 1_{y==i}" );
00161     declareOption(ol, "sumR2", &NnlmOutputLayer::sumR2,
00162                   OptionBase::learntoption,
00163                   "sumR2(i) -> sum_t r_t^2 1_{y==i}" );
00164     declareOption(ol, "sumI", &NnlmOutputLayer::sumI,
00165                   OptionBase::learntoption,
00166                   "sumI(i) -> sum_t 1_{y==i}" );
00167     declareOption(ol, "s_sumI", &NnlmOutputLayer::s_sumI,
00168                   OptionBase::learntoption,
00169                   "sum_t 1" );
00170 
00171     // ### other?
00172 
00173     // Now call the parent class' declareOptions
00174     inherited::declareOptions(ol);
00175 }
00176 
00178 // build
00180 void NnlmOutputLayer::build()
00181 {
00182     inherited::build();
00183     build_();
00184 }
00185 
00187 //build_
00189 void NnlmOutputLayer::build_()
00190 {
00191 
00192     // *** Sanity checks ***
00193     if( input_size <= 0 )  {
00194         PLERROR("NnlmOutputLayer::build_: 'input_size' <= 0 (%i).\n"
00195                 "You should set it to a positive integer.\n", input_size);
00196     }  else if( output_size != 1 )  {
00197         PLERROR("NnlmOutputLayer::build_: 'output_size'(=%i) != 1\n"
00198                   , output_size);
00199     }
00200 
00201     // *** Parameters not initialized ***
00202     if( mu.size() == 0 )   {
00203         resetParameters();
00204     }
00205 
00206 }
00207 
00209 // makeDeepCopyFromShallowCopy
00211 void NnlmOutputLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00212 {
00213     inherited::makeDeepCopyFromShallowCopy(copies);
00214 
00215     deepCopyField(pi, copies);
00216     deepCopyField(mu, copies);
00217     deepCopyField(sigma2, copies);
00218 
00219     deepCopyField(sumI, copies);
00220     deepCopyField(sumR, copies);
00221     deepCopyField(sumR2, copies);
00222 
00223     deepCopyField(el_start_learning_rate, copies);
00224     deepCopyField(el_decrease_constant, copies);
00225     deepCopyField(el_last_update, copies);
00226 
00227     deepCopyField(vec_log_p_rg_t, copies);
00228     deepCopyField(vec_log_p_r_t, copies);
00229     deepCopyField(vec_log_p_rt, copies);
00230 
00231     deepCopyField(beta, copies);
00232 
00233     deepCopyField(nd_gradient, copies);
00234     deepCopyField(ad_gradient, copies);
00235     deepCopyField(fd_gradient, copies);
00236 
00237     deepCopyField(gradient_log_tmp, copies);
00238     deepCopyField(gradient_log_tmp_pos, copies);
00239     deepCopyField(gradient_log_tmp_neg, copies);
00240 }
00241 
00242 
00243 
00245 // resetParameters
00247 // NOTE doesn't reset the class counts
00248 void NnlmOutputLayer::resetParameters()
00249 {
00250 
00251     cout << "NnlmOutputLayer::resetParameters()" << endl;
00252 
00253     step_number = 0;
00254     umc = 0.999999; // ###
00255 
00256     pi.resize( target_cardinality );
00257     pi.fill( 0.0 );
00258     mu.resize( target_cardinality, input_size);
00259     mu.fill( 0.0 );
00260     sigma2.resize( target_cardinality, input_size);
00261     sigma2.fill( 0.0 );
00262 
00263     // ### for a global_sigma2
00264     global_mu.resize(input_size);
00265     global_mu.fill( 0.0 );
00266     global_sigma2.resize(input_size);
00267     global_sigma2.fill( 0.0 );
00268     // ### for a global_sigma2
00269 
00270     resetAllClassVars();
00271 
00272     vec_log_p_rg_t.resize( target_cardinality );
00273     vec_log_p_r_t.resize( target_cardinality );
00274     vec_log_p_rt.resize( target_cardinality );
00275     beta.resize( target_cardinality, input_size );
00276 
00277     nd_gradient.resize( input_size );
00278     nd_gradient.fill( 0.0 );
00279     ad_gradient.resize( input_size );
00280     ad_gradient.fill( 0.0 );
00281     fd_gradient.resize( input_size );
00282     fd_gradient.fill( 0.0 );
00283 
00284     bill.resize( input_size );
00285     bill.fill( 0.0 );
00286     bob.resize( input_size );
00287     bob.fill( 0.0 );
00288 
00289     gradient_log_tmp.resize( input_size );
00290     gradient_log_tmp.fill( 0.0 );
00291     gradient_log_tmp_pos.resize( input_size );
00292     gradient_log_tmp_pos.fill( 0.0 );
00293     gradient_log_tmp_neg.resize( input_size );
00294     gradient_log_tmp_neg.fill( 0.0 );
00295 
00296     //log_p_g_r = safelog( 0.9 );
00297     //sum_log_p_g_r = -REAL_MAX;
00298 
00299 }
00300 
00301 
00303 // resetAllClassVars
00305 // There could be a problem if for example OOV is never seen in train set -> /0.
00306 void NnlmOutputLayer::resetAllClassVars() {
00307 
00308     cout << "NnlmOutputLayer::resetAllClassVars()" << endl;
00309 
00310     s_sumI = 0;
00311     sumI.resize( target_cardinality );
00312     sumI.fill( 0 );
00313     sumR.resize( target_cardinality, input_size);
00314     sumR.fill( 0.0 );
00315     sumR2.resize( target_cardinality, input_size);
00316     sumR2.fill( 0.0 );
00317 
00318     // ### for a global_sigma2
00319     global_sumR.resize(input_size);
00320     global_sumR.fill( 0.0 );
00321     global_sumR2.resize(input_size);
00322     global_sumR2.fill( 0.0 );
00323     // ### for a global_sigma2
00324 }
00325 
00327 // updateClassVars
00329 // Updates the count variables for the target, given the input
00330 void NnlmOutputLayer::updateClassVars(const int the_target, const Vec& the_input)
00331 {
00332     #ifdef BOUNDCHECK
00333     if( the_target >= target_cardinality )  {
00334         PLERROR("NnlmOutputLayer::updateClassVars:'the_target'(=%i) >= 'target_cardinality'(=%i)\n",
00335                    the_target, target_cardinality);
00336     }
00337     #endif
00338 
00339     s_sumI++;
00340     sumI[the_target]++;
00341     for(int i=0; i<input_size; i++) {
00342       sumR( the_target, i ) += the_input[i];
00343       sumR2( the_target, i ) += the_input[i]*the_input[i];
00344 
00345       // ### for a global_sigma2
00346       global_sumR[i] += the_input[i];
00347       global_sumR2[i] += the_input[i]*the_input[i];
00348       // ### for a global_sigma2
00349     }
00350 
00351 }
00352 
00354 // applyAllClassVars
00356 void NnlmOutputLayer::applyAllClassVars()
00357 {
00358 
00359 
00360 
00361     // ### global values
00362     for(int i=0; i<input_size; i++) {
00363         global_mu[i] = global_sumR[i] / (real) s_sumI;
00364 
00365         // Diviser par (n-1) au lieu de n
00366         global_sigma2[i] = ( (real) s_sumI * global_mu[i] * global_mu[i] + 
00367                   global_sumR2[i] - 2.0 * global_mu[i] * global_sumR[i]  ) / (s_sumI - 1);
00368 
00369         if(global_sigma2[i]<sigma2min) {
00370             cout << "NnlmOutputLayer::applyAllClassVars() -> global_sigma2[i]<sigma2min" << endl;
00371             global_sigma2[i] = sigma2min;
00372         }
00373 
00374     } // for input_size
00375     // ### global values
00376 
00377 
00378 
00379     for( int t=0; t<target_cardinality; t++ ) {
00380 
00381         #ifdef BOUNDCHECK
00382         if( sumI[ t ] <= 1 )  {
00383             PLERROR("NnlmOutputLayer::applyAllClassVars - sumI[ %i ] <= 1\n", t);
00384         }
00385         #endif
00386 
00387         for(int i=0; i<input_size; i++) {
00388             pi[t] = (real) sumI[ t ] / s_sumI;
00389             mu( t, i ) = sumR( t, i ) / (real) sumI[ t ];
00390 
00391     // ### global values
00392 /*
00393             // Diviser par (n-1) au lieu de n
00394             sigma2( t, i ) = ( sumI[ t ] * mu(t, i) * mu(t, i) + 
00395                      sumR2(t, i) - 2.0 * mu(t, i) * sumR(t, i)  ) / (sumI[ t ] - 1);
00396 
00397             if(sigma2( t, i )<sigma2min) {
00398                 //cout << "***" << t << "***" << sumI[ t ] << " sur " << s_sumI << endl;
00399                 //cout << "NnlmOutputLayer::applyAllClassVars() -> sigma2(" << t << "," << i <<") "
00400                 //    << sigma2(t, i) <<" < sigma2min(" << sigma2min <<")! Setting to sigma2min." <<endl;
00401 
00402                 sigma2( t, i ) = sigma2min;
00403             }
00404 */
00405             sigma2( t, i ) = global_sigma2[i];
00406 
00407 
00408     // ### global values
00409 
00410         } // for input_size
00411 
00412 /*        cout << "***" << t << "***" << sumI[ t ] << " sur " << s_sumI << endl;
00413         cout << mu( t ) << endl;
00414         cout << sigma2( t ) << endl;*/
00415 
00416     } // for target_cardinality
00417 
00418 
00419 
00420 }
00421 
00423 // computeEmpiricalLearningRateParameters
00425 // MUST be called after sumI[] is initialized with proper counts.
00426 // In the case of empirical learning of mu and sigma, we use a learning rate for mu: mu' = (1-lr) mu + lr r.
00427 // This learning rate should depend on word's frequency (to compensate for the rate of evolution of 'r' with 
00428 // respect to the frequency of occurence of the word.
00429 // Here, we compute the slr so that the first example of the train set will have a weight in the sum
00430 // that is el_start_discount_factor% of the weight the last example has (in a pass over the data).
00431 void NnlmOutputLayer::computeEmpiricalLearningRateParameters()
00432 {
00433     // *** Start learning rate *** 
00434     // (1-slr)^n = el_start_discount_factor -> slr = 1 - (el_start_discount_factor)^{1/n}
00435     el_start_learning_rate.resize(target_cardinality);
00436     el_start_learning_rate.fill(1.0);
00437     for(int i=0; i<target_cardinality; i++) {
00438         el_start_learning_rate[i] -= pow( el_start_discount_factor, 1.0/sumI[i] );
00439     }
00440 
00441     // *** Decrease constant *** 
00442     el_decrease_constant.resize(target_cardinality);
00443     el_decrease_constant.fill(0.0);
00444 
00445     // *** To memorize the step of the last update to the word ***
00446     el_last_update.resize(target_cardinality);
00447     el_last_update.fill(s_sumI);
00448 
00449 }
00450 
00452 // setTarget
00454 void NnlmOutputLayer::setTarget(int the_target) const
00455 {
00456 #ifdef BOUNDCHECK
00457     if( the_target >= target_cardinality )  {
00458         PLERROR("NnlmOutputLayer::setTarget:'the_target'(=%i) >= 'target_cardinality'(=%i)\n",
00459                    the_target, target_cardinality);
00460     }
00461 #endif
00462 
00463     target = the_target;
00464 }
00465 
00467 // setContext
00469 void NnlmOutputLayer::setContext(int the_context) const
00470 {
00471 #ifdef BOUNDCHECK
00472     if( the_context >= context_cardinality )  {
00473         PLERROR("NnlmOutputLayer::setContext:'the_context'(=%i) >= 'context_cardinality'(=%i)\n"
00474                   , the_context, context_cardinality);
00475     }
00476 #endif
00477 
00478     context = the_context;
00479 }
00480 
00482 // setCost
00484 // Sets the cost computed in the fprop
00485 void NnlmOutputLayer::setCost(int the_cost)
00486 {
00487 #ifdef BOUNDCHECK
00488     if( the_cost > 2 || the_cost < 0 )  {
00489         PLERROR("NnlmOutputLayer::setCost:'the_cost'(=%i) > '2' or < '0'\n"
00490                   , the_cost);
00491     }
00492 #endif
00493 
00494     cost = the_cost;
00495 }
00496 
00498 // setLearning 
00500 // 
00501 void NnlmOutputLayer::setLearning(int the_learning)
00502 {
00503 #ifdef BOUNDCHECK
00504     if( the_learning > 1 || the_learning < 0 )  {
00505         PLERROR("NnlmOutputLayer::setLearning:'the_learning'(=%i) > '1' or < '0'\n"
00506                   , the_learning);
00507     }
00508 #endif
00509 
00510     learning = the_learning;
00511 }
00512 
00513 
00515 // fprop
00524 void NnlmOutputLayer::fprop(const Vec& input, Vec& output) const
00525 {
00526 
00527     the_real_target = target;
00528 
00529 
00530     // *** In the case of empirical (max likelihood) learning of mu and sigma ***
00531     // we can update mu and sigma before computing the cost and backpropagating.
00532     if( (learning==LEARNING_EMPIRICAL) && is_learning )  {
00533         applyMuAndSigmaEmpiricalUpdate(input);
00534     }
00535 
00536     // *** Non-discriminant cost: -log( p(r,t) ) ***
00537     if( cost == COST_NON_DISCR ) {
00538         compute_nl_p_rt( input, output );
00539     }
00540     // *** Approx-discriminant cost ***
00541     else if( cost == COST_APPROX_DISCR )  {
00542         compute_approx_nl_p_t_r( input, output );
00543     }
00544     // *** Discriminant cost: -log( p(t|r) ) ***
00545     else if( cost == COST_DISCR )  {
00546         compute_nl_p_t_r( input, output );
00547     }
00548     else  {
00549         PLERROR("NnlmOutputLayer::fprop - invalid cost\n");
00550     }
00551 
00552 }
00553 
00555 // compute_nl_p_rt
00558 void NnlmOutputLayer::compute_nl_p_rt(const Vec& input, Vec& output) const
00559 {
00560 
00561     // *** Sanity check ***
00562     int in_size = input.size();
00563     if( in_size != input_size ) {
00564         PLERROR("NnlmOutputLayer::compute_nl_p_rt: 'input.size()' should be equal\n"
00565                 " to 'input_size' (%i != %i)\n", in_size, input_size);
00566     }
00567 
00568     // *** Compute gaussian's exponent - 'g' means gaussian ***
00569     // NOTE \Sigma is a diagonal matrix, ie det() = \Prod and inverse is 1/...
00570 
00571     g_exponent = 0.0;
00572     log_g_det_covariance = 0.0;
00573 
00574     //cout << "**** s ";
00575 
00576     for(int i=0; i<input_size; i++) {
00577       //cout << "g_exponent " << g_exponent << endl;
00578       // s = r[i] - mu_t[i]
00579       s = input[i] - mu(target, i);
00580 
00581       //cout << s ;
00582 
00583       // memorize this calculation for gradients computation
00584       beta(target, i) = s / sigma2(target, i);
00585 
00586       g_exponent += s * beta(target, i);
00587 
00588       // determinant of covariance matrix
00589       log_g_det_covariance += safelog( sigma2(target, i) );
00590     }
00591     //cout << endl;
00592 
00593     g_exponent *= -0.5;
00594 
00595     // ### Should we use logs here?
00596     //cout << "g_exponent " << g_exponent << " log_g_det_covariance " << log_g_det_covariance << endl;
00597 
00598 #ifdef BOUNDCHECK
00599     if( isnan(g_exponent) || isnan(log_g_det_covariance) ) {
00600       PLERROR( "NnlmOutputLayer::compute_nl_p_rt - NAN present.\n" );
00601     }
00602 #endif
00603 
00604     // * Compute normalizing factor
00605     log_g_normalization = - 0.5 * ( (input_size) * safelog(2.0 * Pi) + log_g_det_covariance );
00606 
00607     //cout << "log_g_normalization " << log_g_normalization << endl;
00608 
00609     // * Compute log p(r,g|t) = log( p(r|t,g) p(g) ) = log( umc p_gaussian(r|t) )
00610     vec_log_p_rg_t[target] = safelog(umc) + g_exponent + log_g_normalization;
00611 
00612     //cout << "p(r,g|t) " << safeexp( vec_log_p_rg_t[target] ) << endl;
00613 
00614     // * Compute log p(r|t) = log( umc p_g(r|t) + (1-umc) p_u(r|t) )
00615     vec_log_p_r_t[target] = logadd( vec_log_p_rg_t[target] , safelog(1.0-umc) - (input_size) * safelog(2.0));
00616 
00617     //cout << "p_u " << safeexp( safelog(1.0-umc) - (input_size) * safelog(2.0) ) << endl;
00618 
00619     // * Compute log p(r,t)
00620     vec_log_p_rt[target] = safelog(pi[target]) + vec_log_p_r_t[target];
00621 
00622     // * Compute output
00623     output[0] = - vec_log_p_rt[target];
00624 
00625     //cout << "safeexp( vec_log_p_rt[target] ) " << safeexp( vec_log_p_rt[target] ) << endl;
00626 
00627 #ifdef BOUNDCHECK
00628     if( isnan(vec_log_p_rt[target]) ) {
00629       PLERROR( "NnlmOutputLayer::compute_nl_p_rt - NAN present.\n" );
00630     }
00631 #endif
00632 
00633     // * Compute posterior for coeff_class_conditional_uniform_mixture evaluation in the bpropUpdate
00634     // p(generated by gaussian| r) = a p_g(r|i) / p(r|i)
00635     //log_p_g_r = safelog(umc) + g_exponent + log_g_normalization - log_p_r_i;
00636 
00637 }
00638 
00640 // compute_nl_p_t_r
00643 void NnlmOutputLayer::compute_nl_p_t_r(const Vec& input, Vec& output) const
00644 {
00645     Vec nl_p_rt;
00646     Vec nl_p_ru;
00647 
00648     nl_p_rt.resize( 1 );
00649     nl_p_ru.resize( 1 );
00650 
00651 
00652     // * Compute numerator
00653     compute_nl_p_rt( input, nl_p_rt );
00654 
00655     // * Compute denominator
00656     // Normalize over whole vocabulary
00657 
00658     log_sum_p_ru = -REAL_MAX;
00659 
00660     for(int u=0; u<target_cardinality; u++)  {
00661         setTarget( u );
00662         compute_nl_p_rt( input, nl_p_ru );
00663         log_sum_p_ru = logadd(log_sum_p_ru, -nl_p_ru[0]);
00664     }
00665 
00666     //cout << "log_p_rt[0] " << -nl_p_rt[0] << " log_sum_p_ru " << log_sum_p_ru << endl;
00667 
00668     output[0] = nl_p_rt[0] + log_sum_p_ru;
00669 
00670     //cout << "p_t_r " << safeexp( - output[0] ) << endl;
00671 
00672 #ifdef BOUNDCHECK
00673     if( isnan(output[0]) ) {
00674       PLERROR( "NnlmOutputLayer::compute_nl_p_t_r - NAN present.\n" );
00675     }
00676 #endif
00677 
00678 }
00679 
00682 void NnlmOutputLayer::getBestCandidates(const Vec& input, Vec& candidate_tags, Vec& probabilities) const
00683 {
00684                 candidate_tags.resize(10);
00685                 probabilities.resize(10);
00686 
00687     std::vector< wordAndProb > tmp;
00688                 Vec nl_p_ru(1);
00689 
00690     for(int u=0; u<target_cardinality; u++)  {  
00691         setTarget( u );
00692         compute_nl_p_rt( input, nl_p_ru );
00693 
00694         tmp.push_back( wordAndProb( u, safeexp( - (nl_p_ru[0] + log_sum_p_ru) ) ) );
00695     }
00696 
00697     std::sort(tmp.begin(), tmp.end(), wordAndProbGT);
00698 
00699     // HACK we don't check if itr has hit the end... unlikely target_cardinality is smaller than 10
00700     std::vector< wordAndProb >::iterator itr_vec;
00701     itr_vec=tmp.begin();
00702     for(int i=0; i<10; i++) {
00703                 candidate_tags[i] = itr_vec->wordtag;
00704                 probabilities[i] = itr_vec->probability;
00705         itr_vec++;
00706     }
00707 
00708     tmp.clear();
00709 }
00710 
00711 
00713 // compute_approx_nl_p_t_r
00716 void NnlmOutputLayer::compute_approx_nl_p_t_r(const Vec& input, Vec& output) const
00717 {
00718     // *** Compute for the target ***
00719     Vec vec_nd_cost(1);
00720     compute_nl_p_rt(input, vec_nd_cost);
00721 
00722 //nd_cost = -log_p_rt;
00723 
00724     // *** Compute for the normalization candidates ***
00725     Vec nl_p_ru;
00726     nl_p_ru.resize( 1 );
00727     log_sum_p_ru = vec_log_p_rt[the_real_target];
00728     int c;
00729 
00730     // shared candidates
00731     for( int i=0; i< shared_candidates.length(); i++ )
00732     {
00733         c = shared_candidates[i];
00734         if( c!=the_real_target )  {
00735             setTarget( c );
00736             compute_nl_p_rt( input, nl_p_ru );
00737             log_sum_p_ru = logadd(log_sum_p_ru, -nl_p_ru[0]);
00738         }
00739     }
00740 
00741     // context candidates 
00742     for( int i=0; i< candidates[ context ].length(); i++ )
00743     {
00744         c = candidates[ context ][i];
00745         if( c!=the_real_target )  {
00746             setTarget( c );
00747             compute_nl_p_rt( input, nl_p_ru );
00748             log_sum_p_ru = logadd(log_sum_p_ru, -nl_p_ru[0]);
00749         }
00750     }
00751 
00752     // *** The approximate discriminant cost ***
00753     output[0] = vec_nd_cost[0] + log_sum_p_ru;
00754 
00755 #ifdef BOUNDCHECK
00756     if( isnan(output[0]) ) {
00757       PLERROR( "NnlmOutputLayer::compute_approx_nl_p_t_r - NAN present.\n" );
00758     }
00759 #endif
00760 
00761 }
00762 
00763 //--------------------------------------------------------------------------------------------------------------------------------
00764 
00766 // computeNonDiscriminantGradient
00769 void NnlmOutputLayer::computeNonDiscriminantGradient() const
00770 {
00771     //cout << "vec_log_p_rg_t[the_real_target] " << vec_log_p_rg_t[the_real_target] << " vec_log_p_r_t[the_real_target] " << vec_log_p_r_t[the_real_target] << endl;
00772 
00773     real tmp = safeexp( vec_log_p_rg_t[the_real_target] - vec_log_p_r_t[the_real_target] );
00774 
00775     for(int i=0; i<input_size; i++) {
00776         nd_gradient[i] = beta( the_real_target, i) * tmp;
00777     }
00778 
00779 }
00780 
00781 
00783 // computeApproxDiscriminantGradient
00786 void NnlmOutputLayer::computeApproxDiscriminantGradient() const
00787 {
00788     gradient_log_tmp.fill(-REAL_MAX);
00789     gradient_log_tmp_pos.fill(-REAL_MAX);
00790     gradient_log_tmp_neg.fill(-REAL_MAX);
00791 
00792     // * Compute nd gradient
00793     computeNonDiscriminantGradient();
00794 
00795     // * Compute ad specific term
00796     int c;
00797 
00798     // target
00799     addCandidateContribution( the_real_target );
00800 
00801     // shared candidates
00802     for( int i=0; i< shared_candidates.length(); i++ )
00803     {
00804         c = shared_candidates[i];
00805         if( c != the_real_target )
00806             addCandidateContribution( c );
00807     }
00808 
00809     // context candidates 
00810     for( int i=0; i< candidates[ context ].length(); i++ )
00811     {
00812         c = candidates[ context ][i];
00813         if( c != the_real_target )
00814             addCandidateContribution( c );
00815     }
00816 
00817 
00818     // *** The corresponding approx gradient ***
00819     for(int j=0; j<input_size; j++) {
00820         if( gradient_log_tmp_pos[j] > gradient_log_tmp_neg[j] ) {
00821             gradient_log_tmp[j] = logsub( gradient_log_tmp_pos[j], gradient_log_tmp_neg[j] );
00822             ad_gradient[j] = nd_gradient[j] - safeexp( gradient_log_tmp[j] - log_sum_p_ru);
00823         } else  {
00824             gradient_log_tmp[j] = logsub( gradient_log_tmp_neg[j], gradient_log_tmp_pos[j] );
00825             ad_gradient[j] = nd_gradient[j] + safeexp( gradient_log_tmp[j] - log_sum_p_ru);
00826         }
00827     }
00828 
00829 }
00830 
00832 // computeDiscriminantGradient
00835 void NnlmOutputLayer::computeDiscriminantGradient() const
00836 {
00837     gradient_log_tmp.fill(-REAL_MAX);
00838     gradient_log_tmp_pos.fill(-REAL_MAX);
00839     gradient_log_tmp_neg.fill(-REAL_MAX);
00840 
00841     // * Compute nd gradient
00842     computeNonDiscriminantGradient();
00843 
00844     // * Compute ad specific term
00845     for( int u=0; u< target_cardinality; u++ )
00846     {
00847         addCandidateContribution( u );
00848     }
00849 
00850 
00851     // *** The corresponding approx gradient ***
00852     for(int j=0; j<input_size; j++) {
00853         if( gradient_log_tmp_pos[j] > gradient_log_tmp_neg[j] ) {
00854             gradient_log_tmp[j] = logsub( gradient_log_tmp_pos[j], gradient_log_tmp_neg[j] );
00855             fd_gradient[j] = nd_gradient[j] - safeexp( gradient_log_tmp[j] - log_sum_p_ru);
00856         } else  {
00857             gradient_log_tmp[j] = logsub( gradient_log_tmp_neg[j], gradient_log_tmp_pos[j] );
00858             fd_gradient[j] = nd_gradient[j] + safeexp( gradient_log_tmp[j] - log_sum_p_ru);
00859         }
00860     }
00861 
00862 //cout << "===nd_gradient " << nd_gradient << endl;
00863 //cout << "---fd_gradient " << nd_gradient << endl;
00864 
00865 }
00866 
00868 // addCandidateContribution
00870 void NnlmOutputLayer::addCandidateContribution( int c ) const
00871 {
00872     for(int i=0; i<input_size; i++) {
00873         if( beta(c,i) > 0)  {
00874             gradient_log_tmp_pos[i] = logadd( gradient_log_tmp_pos[i], 
00875                     vec_log_p_rg_t[c] + safelog( beta(c,i) ) +  safelog( pi[c] ) );
00876         } else  {
00877             gradient_log_tmp_neg[i] = logadd( gradient_log_tmp_neg[i], 
00878                     vec_log_p_rg_t[c] + safelog( -beta(c,i) ) +  safelog( pi[c] ) );
00879         }
00880 
00881         #ifdef BOUNDCHECK
00882         if( isnan(gradient_log_tmp_pos[i]) || isnan(gradient_log_tmp_neg[i]) ) {
00883           PLERROR("NnlmOutputLayer::computeApproxDiscriminantGradient - gradient_log_tmp_pos or gradient_log_tmp_neg is NAN.\n");
00884         }
00885         #endif
00886     }
00887 }
00888 
00889 
00900 /*void NnlmOutputLayer::bpropUpdate(const Vec& input, const Vec& output,
00901                                const Vec& output_gradient)
00902 {
00903 }*/
00904 
00905 
00906 
00909 void NnlmOutputLayer::bpropUpdate(const Vec& input, const Vec& output,
00910                                Vec& input_gradient,
00911                                const Vec& output_gradient)
00912 {
00913 
00914     int in_size = input.size();
00915     int out_size = output.size();
00916     int og_size = output_gradient.size();
00917 
00918     // *** Sanity checks
00919     if( in_size != input_size ) {
00920         PLERROR("NnlmOutputLayer::bpropUpdate:'input.size()' should be equal\n"
00921                 " to 'input_size' (%i != %i)\n", in_size, input_size);
00922     }  else if( out_size != output_size )  {
00923         PLERROR("NnlmOutputLayer::bpropUpdate:'output.size()' should be"
00924                 " equal\n"
00925                 " to 'output_size' (%i != %i)\n", out_size, output_size);
00926     }  else if( og_size != output_size )  {
00927         PLERROR("NnlmOutputLayer::bpropUpdate:'output_gradient.size()'"
00928                 " should\n"
00929                 " be equal to 'output_size' (%i != %i)\n",
00930                 og_size, output_size);
00931     }
00932 
00933     // *** Compute input_gradient ***
00934     // *** Compute input_gradient ***
00935 
00936     if( cost == COST_NON_DISCR ) {
00937         computeNonDiscriminantGradient();
00938         input_gradient << nd_gradient;
00939     }
00940     else if( cost == COST_APPROX_DISCR )  {
00941         computeApproxDiscriminantGradient();
00942         input_gradient << ad_gradient;
00943     }
00944 
00945     else if( cost == COST_DISCR )  {
00946         computeDiscriminantGradient();
00947         input_gradient << fd_gradient;
00948     }
00949     else  {
00950         PLERROR("NnlmOutputLayer::bpropUpdate - invalid cost\n");
00951     }
00952 
00953 //    cout << "NnlmOutputLayer::bpropUpdate -> input_gradient " << input_gradient << endl; 
00954 
00955     #ifdef BOUNDCHECK
00956     for(int i=0; i<input_size; i++) {
00957         if( isnan(input_gradient[i]) ) {
00958           PLERROR( "NnlmOutputLayer::bpropUpdate - isnan(input_gradient[i]) true.\n" );
00959         }
00960     }
00961     #endif
00962 
00963 
00964 
00965     // *** Discriminant learning of mu and sigma ***
00966     // *** Discriminant learning of mu and sigma ***
00967 
00968     if( learning == LEARNING_DISCRIMINANT )  {
00969         applyMuGradient();
00970         applySigmaGradient();
00971     }
00972     // *** Empirical learning of mu and sigma ***
00973     // *** Empirical learning of mu and sigma ***
00974 
00975     //if( learning == LEARNING_EMPIRICAL )  {
00976     //    applyMuAndSigmaEmpirical();
00977     //}
00978 
00979 
00980 }
00981 
00983 // applyMuAndSigmaEmpiricalUpdate
00985 // TODO I tend to think this update should be done before computing the cost, in the fprop,
00986 // since this non discriminant learning procedure does not require computation of the cost
00987 // Each word is seen a different number of times in the train set
00988 void NnlmOutputLayer::applyMuAndSigmaEmpiricalUpdate(const Vec& input) const
00989 {
00990     // *** Update counts *** 
00991     for(int i=0; i<input_size; i++) {
00992         s_sumI++;
00993         sumI[ target ]++;
00994         sumR( target, i ) += input[i];
00995         sumR2( target, i ) += input[i]*input[i];
00996 
00997         // ### for a global_sigma2
00998         global_sumR[i] += input[i];
00999         global_sumR2[i] += input[i]*input[i];
01000         // ### for a global_sigma2
01001 
01002     }
01003 
01004     // *** Intermediate values ***
01005     int n_ex_since_last_update = s_sumI - (int)el_last_update[target];
01006     Vec old_mu;
01007     old_mu << mu(target);
01008     el_last_update[target] = sumI[ target ];
01009 
01010 
01011     // *** Compute learning rate ***
01012     //real el_lr = el_start_learning_rate[target] / ( 1.0 + sumI[target] * el_decrease_constant[target] );
01013     //cout << "el_lr " << el_lr << endl;
01014 
01015     // *** Update mu ***
01016     for(int i=0; i<input_size; i++) {
01017         mu( target, i ) = sumR( target, i ) / sumI[ target ];
01018         //mu( target, i ) = (1.0-el_lr) * mu( target, i ) + el_lr * input[i];
01019 
01020         // ### for a global_sigma2
01021         global_mu[i] = global_sumR[i] / (real) s_sumI;
01022     }
01023 
01024     // *** Update sigma ***
01025     for(int i=0; i<input_size; i++) {
01026 
01027         // ### for a global_sigma2
01028         // Diviser par (n-1) au lieu de n
01029         global_sigma2[i] = ( (real) s_sumI * global_mu[i] * global_mu[i] + 
01030                   global_sumR2[i] - 2.0 * global_mu[i] * global_sumR[i]  ) / (s_sumI - 1);
01031 
01032 /*        sigma2( target, i ) = (sumI[target]*mu(target, i)*mu(target, i) + sumR2(target,i) -2.0 * mu(target, i) * sumR(target, i) ) / 
01033                                 (sumI[target]-1);
01034 */
01035           sigma2( target, i ) = global_sigma2[i];
01036 
01037 
01038       // ### for a global_sigma2
01039 
01040 
01041         // Add reguralizer to compensate for the frequency at which the word is seen
01042         // TODO
01043         // old_mu
01044 
01045         // Enforce minimal sigma
01046         if(sigma2( target, i )<sigma2min) {
01047             cout << "<sigma2min!" << endl;
01048             sigma2( target, i ) = sigma2min;
01049         }
01050 
01051         if( isnan( sigma2( target, i ) ) ) {
01052           PLERROR( "NnlmOutputLayer::applyMuAndSigmaEmpiricalUpdate - isnan( sigma2( target, i ) )!\n" );
01053         }
01054     }
01055 
01056     // Update uniform mixture coefficient
01057     //sum_log_p_g_r = logadd( sum_log_p_g_r, log_p_g_r );
01058     //umc = safeexp( sum_log_p_g_r ) / s_sumI;
01059 }
01060 
01062 // applyMuGradient
01066 void NnlmOutputLayer::applyMuGradient() const
01067 {
01068     dl_lr = dl_start_learning_rate / ( 1.0 + dl_decrease_constant * step_number);
01069 
01070 
01071     if( cost == COST_NON_DISCR ) {
01072         Vec mu_gradient( input_size );
01073         mu_gradient << nd_gradient;
01074         for( int i=0; i<input_size; i++ ) {
01075             mu_gradient[i] = - mu_gradient[i];
01076             mu(the_real_target,i) -= dl_lr * mu_gradient[i];
01077         }
01078     }
01079 
01080 
01081     else if( cost == COST_APPROX_DISCR )  {
01082 
01083         // for the target
01084         applyMuTargetGradient();
01085 
01086         // --- for the others ---
01087         int c;
01088         // shared candidates
01089         for( int i=0; i< shared_candidates.length(); i++ )
01090         {
01091             c = shared_candidates[i];
01092             if( c != the_real_target )  {
01093                 applyMuCandidateGradient(c);
01094             }
01095         }
01096 
01097         // context candidates 
01098         for( int i=0; i< candidates[ context ].length(); i++ )
01099         {
01100             c = candidates[ context ][i];
01101             if( c != the_real_target )  {
01102                 applyMuCandidateGradient(c);
01103             }
01104         }
01105 
01106     }
01107 
01108 
01109     else if( cost == COST_DISCR )  {
01110         applyMuTargetGradient();
01111         for( int u=0; u< target_cardinality; u++ )  {
01112             if( u != the_real_target )  {
01113                 applyMuCandidateGradient(u);
01114             }
01115         }
01116 
01117 
01118 
01119     }
01120     else  {
01121         PLERROR("NnlmOutputLayer::applyMuGradient - invalid cost\n");
01122     }
01123 
01124 }
01126 // applyMuTargetGradient
01130 void NnlmOutputLayer::applyMuTargetGradient() const
01131 {
01132 //    Vec bill( input_size );
01133 
01134 
01135     Vec mu_gradient( input_size );
01136     mu_gradient << nd_gradient;
01137     for( int i=0; i<input_size; i++ ) {
01138         mu_gradient[i] = - mu_gradient[i];
01139 
01140         if( beta(the_real_target,i) > 0.0 ) {
01141             mu_gradient[i] += safeexp( 
01142                 safelog( pi[the_real_target] ) + vec_log_p_rg_t[the_real_target] + safelog( beta(the_real_target,i) ) - log_sum_p_ru );
01143         } else  {
01144             mu_gradient[i] -= safeexp( 
01145                 safelog( pi[the_real_target] ) + vec_log_p_rg_t[the_real_target] + safelog( -beta(the_real_target,i) ) - log_sum_p_ru );
01146         }
01147 
01148         mu(the_real_target,i) -= dl_lr * mu_gradient[i];
01149 
01150 //bill[i] = mu_gradient[i];
01151     }
01152 //cout << "MU target GRADIENT " << bill << endl;
01153 
01154 }
01156 // applyMuCandidateGradient
01160 void NnlmOutputLayer::applyMuCandidateGradient(int c) const
01161 {
01162 //    Vec bill( input_size );
01163 
01164     Vec mu_gradient(input_size);
01165 
01166     for( int i=0; i<input_size; i++ ) {
01167         if( beta(c,i) > 0.0 ) {
01168             mu_gradient[i] = safeexp( 
01169                 safelog( pi[c] ) + vec_log_p_rg_t[c] + safelog( beta(c,i) ) - log_sum_p_ru );
01170         } else  {
01171             mu_gradient[i] = - safeexp( 
01172                 safelog( pi[c] ) + vec_log_p_rg_t[c] + safelog( -beta(c,i) ) - log_sum_p_ru );
01173         }
01174         mu(c,i) -= dl_lr * mu_gradient[i];
01175 
01176 //bill[i] = - dl_lr * mu_gradient[i];
01177     }
01178 //cout << "MU candidate GRADIENT " << bill << endl;
01179 }
01180 
01182 // applySigmaGradient
01184 void NnlmOutputLayer::applySigmaGradient() const
01185 {
01186     dl_lr = dl_start_learning_rate / ( 1.0 + dl_decrease_constant * step_number);
01187 
01188     Vec sigma2_gradient( input_size );
01189 
01190 
01191     if( cost == COST_NON_DISCR ) {
01192 
01193         real tmp = -0.5 * safeexp( vec_log_p_rg_t[ the_real_target ] - vec_log_p_r_t[ the_real_target ] );
01194 
01195         for( int i=0; i<input_size; i++ ) {
01196             sigma2_gradient[i] = tmp * ( beta(the_real_target,i) * beta(the_real_target,i) - 1.0/sigma2(the_real_target,i) );
01197             sigma2(the_real_target,i) -= dl_lr * sigma2_gradient[i];
01198         }
01199 
01200     }
01201 
01202 
01203     else if( cost == COST_APPROX_DISCR )  {
01204         applySigmaTargetGradient();
01205 
01206         // --- for the others ---
01207         int c;
01208         // shared candidates
01209         for( int i=0; i< shared_candidates.length(); i++ )
01210         {
01211             c = shared_candidates[i];
01212             if( c != the_real_target )  {
01213                 applySigmaCandidateGradient(c);
01214             }
01215         }
01216 
01217         // context candidates 
01218         for( int i=0; i< candidates[ context ].length(); i++ )
01219         {
01220             c = candidates[ context ][i];
01221             if( c != the_real_target )  {
01222                 applySigmaCandidateGradient(c);
01223             }
01224         }
01225 
01226     }
01227 
01228 
01229     else if( cost == COST_DISCR )  {
01230         applySigmaTargetGradient();
01231         for( int u=0; u< target_cardinality; u++ )  {
01232             if( u != the_real_target )  {
01233                 applySigmaCandidateGradient(u);
01234             }
01235         }
01236 
01237     }
01238     else  {
01239         PLERROR("NnlmOutputLayer::applySigmaGradient - invalid cost\n");
01240     }
01241 
01242 
01243 }
01244 void NnlmOutputLayer::applySigmaTargetGradient() const
01245 {
01246   //  Vec bob( input_size );
01247 
01248     Vec sigma2_gradient( input_size );
01249 
01250     real tmp = -0.5 * safeexp( vec_log_p_rg_t[ the_real_target ] - vec_log_p_r_t[ the_real_target ] );
01251     real tmp2 = 0.5 * pi[the_real_target] * safeexp( vec_log_p_rg_t[ the_real_target ] - log_sum_p_ru );
01252     real tmp3;
01253 
01254     for( int i=0; i<input_size; i++ ) {
01255         tmp3 = beta(the_real_target,i) * beta(the_real_target,i) - 1.0/sigma2(the_real_target,i);
01256         sigma2_gradient[i] = tmp * tmp3;
01257         sigma2_gradient[i] += tmp2 * tmp3;
01258         sigma2(the_real_target,i) -= dl_lr * sigma2_gradient[i];
01259 
01260             // Enforce minimal sigma
01261             if(sigma2( the_real_target, i )<sigma2min) {
01262                 sigma2( the_real_target, i ) = sigma2min;
01263             }
01264 
01265 //bob[i] = sigma2_gradient[i];
01266     }
01267 //cout << "SIGMA target GRADIENT " << bob << endl;
01268 
01269 }
01270 
01271 
01272 void NnlmOutputLayer::applySigmaCandidateGradient(int c) const
01273 {
01274 //    Vec bob( input_size );
01275 
01276     Vec sigma2_gradient( input_size );
01277 
01278     real tmp2 = 0.5 * pi[c] * safeexp( vec_log_p_rg_t[ c ] - log_sum_p_ru );
01279     real tmp3;
01280 
01281     for( int i=0; i<input_size; i++ ) {
01282         tmp3 = beta(c,i) * beta(c,i) - 1.0/sigma2(c,i);
01283         sigma2_gradient[i] = tmp2 * tmp3;
01284         sigma2(c,i) -= dl_lr * sigma2_gradient[i];
01285 
01286             // Enforce minimal sigma
01287             if(sigma2( c, i )<sigma2min) {
01288                 sigma2( c, i ) = sigma2min;
01289             }
01290 //bob[i] = - dl_lr * sigma2_gradient[i];
01291 
01292     }
01293 //cout << "SIGMA candidate GRADIENT " << bob << endl;
01294 }
01295 
01298 void NnlmOutputLayer::forget()
01299 {
01300     cout << "NnlmOutputLayer::forget()" << endl;
01301     resetParameters();
01302 }
01303 
01304 /* THIS METHOD IS OPTIONAL
01309 void NnlmOutputLayer::finalize()
01310 {
01311 }
01312 */
01313 
01314 /* THIS METHOD IS OPTIONAL
01317 bool NnlmOutputLayer::bpropDoesNothing()
01318 {
01319 }
01320 */
01321 
01322 
01332 /*void NnlmOutputLayer::bbpropUpdate(const Vec& input, const Vec& output,
01333                                 const Vec& output_gradient,
01334                                 const Vec& output_diag_hessian)
01335 {
01336 }*/
01337 
01338 
01339 /* THIS METHOD IS OPTIONAL
01346 void NnlmOutputLayer::bbpropUpdate(const Vec& input, const Vec& output,
01347                                 Vec& input_gradient,
01348                                 const Vec& output_gradient,
01349                                 Vec& input_diag_hessian,
01350                                 const Vec& output_diag_hessian)
01351 {
01352 }
01353 */
01354 
01355 
01356 
01357 } // end of namespace PLearn
01358 
01359 
01360 /*
01361   Local Variables:
01362   mode:c++
01363   c-basic-offset:4
01364   c-file-style:"stroustrup"
01365   c-file-offsets:((innamespace . 0)(inline-open . 0))
01366   indent-tabs-mode:nil
01367   fill-column:79
01368   End:
01369 */
01370 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines