PLearn 0.1
LayerCostModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // LayerCostModule.cc
00004 //
00005 // Copyright (C) 2007 Jerome Louradour
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Author: Jerome Louradour
00036 
00041 #include "LayerCostModule.h"
00042 
00043 namespace PLearn {
00044 using namespace std;
00045 
00046 PLEARN_IMPLEMENT_OBJECT(
00047     LayerCostModule,
00048     "Computes a cost function on Layer given its outputs only, and Back-propagates the gradient.\n",
00049     "The input port of this Module must be connected to:\n"
00050     "- Expectations of a RBM hidden layer (e.g. in a DBN), or\n"
00051     "- Activations of a layer (in a Neural Net), or\n"
00052     "- Real outputs of any layer.\n"
00053     "Based on these values, several cost functions can be chosen.\n"
00054     "Be careful: some are valid only for binomial layers. \n");
00055 
00056 LayerCostModule::LayerCostModule():
00057     cost_function("correlation"),
00058     nstages_max(-1),
00059     momentum(0.),
00060     optimization_strategy("standard"),
00061     alpha(0.),
00062     histo_size(10),
00063     penalty_function("square"),
00064     cost_function_completename(""),
00065     stage(0),
00066     bprop_all_terms(true),
00067     random_index_during_bprop(false),
00068     average_deriv(0.)
00069 {
00070     output_size = 1;
00071 }
00072 
00073 void LayerCostModule::declareOptions(OptionList& ol)
00074 {
00075     // Now call the parent class' declareOptions
00076     inherited::declareOptions(ol);
00077 
00078     declareOption(ol, "cost_function", &LayerCostModule::cost_function,
00079                   OptionBase::buildoption,
00080         "The cost function applied to the layer:\n"
00081         "- \"pascal\" :"
00082         " Pascal Vincent's God given cost function.\n"
00083         "- \"correlation\":"
00084         " average of a function applied to the correlations between outputs.\n"
00085         "- \"kl_div\":"
00086         " KL divergence between distrubution of outputs (sampled with x)\n"
00087         "- \"kl_div_simple\":"
00088         " simple version of kl_div where we count at least one sample per histogram's bin\n"
00089         "- \"stochastic_cross_entropy\" [default]:"
00090         " average cross-entropy between pairs of binomial units\n"
00091         "- \"stochastic_kl_div\":"
00092         " average KL divergence between pairs of binomial units\n"
00093         );
00094 
00095     declareOption(ol, "nstages_max", &LayerCostModule::nstages_max,
00096                   OptionBase::buildoption,
00097         "Maximal number of updates for which the gradient of the cost function will be propagated.\n"
00098         "-1 means: always train without limit.\n"
00099         );
00100 
00101     declareOption(ol, "optimization_strategy", &LayerCostModule::optimization_strategy,
00102                   OptionBase::buildoption,
00103         "Strategy to compute the gradient:\n"
00104         "- \"standard\": standard computation\n"
00105         "- \"half\": we will propagate the gradient only on units tagged as i < j.\n"
00106         "- \"random_half\": idem than 'half' with the order of the indices that changes randomly during training.\n"
00107         );
00108 
00109     declareOption(ol, "momentum", &LayerCostModule::momentum,
00110                   OptionBase::buildoption,
00111         "(in [0,1[) For non stochastic cost functions, momentum to compute the moving means.\n"
00112         );
00113 
00114     declareOption(ol, "histo_size", &LayerCostModule::histo_size,
00115                   OptionBase::buildoption,
00116         "For \"kl_div\" cost functions,\n"
00117         "number of bins for the histograms (to estimate distributions of outputs).\n"
00118         "The higher is histo_size, the more precise is the estimation.\n"
00119         );
00120 
00121     declareOption(ol, "alpha", &LayerCostModule::alpha,
00122                   OptionBase::buildoption,
00123         "(>=0) For \"pascal\" cost function,\n"
00124         "number of bins for the histograms (to estimate distributions of outputs).\n"
00125         "The higher is histo_size, the more precise is the estimation.\n"
00126         );
00127 
00128     declareOption(ol, "penalty_function", &LayerCostModule::penalty_function,
00129                   OptionBase::buildoption,
00130                   "(For non-stochastic cost functions)\n"
00131                   "Function applied to the local cost between two inputs to compute\n"
00132                   "the global cost on the whole set of inputs (by averaging).\n"
00133                   "- \"square\": f(x)= x^2      \n"
00134                   "- \"log\":    f(x)= -log( 1 - x) \n"
00135                   "- \"exp\":    f(x)= exp( x )     \n"
00136                   "- \"linear\": f(x)= x       \n"
00137         );
00138 
00139     declareOption(ol, "cost_function_completename", &LayerCostModule::cost_function_completename,
00140                   OptionBase::learntoption,
00141                   "complete name of cost_function (take into account some internal settings).\n"
00142         );
00143 
00144     declareOption(ol, "stage", &LayerCostModule::stage,
00145                   OptionBase::learntoption,
00146                   "number of stages that has been done during the training.\n"
00147         );
00148 
00149     declareOption(ol, "inputs_expectation_trainMemory", &LayerCostModule::inputs_expectation_trainMemory,
00150                   OptionBase::nosave,
00151                   "Correlation of the outputs, for all pairs of units.\n"
00152         );
00153 
00154     declareOption(ol, "inputs_cross_quadratic_mean_trainMemory", &LayerCostModule::inputs_cross_quadratic_mean_trainMemory,
00155                   OptionBase::nosave,
00156                   "Expectation of the cross products between outputs, for all pairs of units.\n"
00157         );
00158 }
00159 
00160 void LayerCostModule::build_()
00161 {
00162     PLASSERT( histo_size > 1 );
00163     PLASSERT( momentum >= 0.);
00164     PLASSERT( momentum < 1.);
00165 
00166     if( input_size > 1 )
00167         norm_factor = 1./(real)(input_size*(input_size-1));
00168 
00169     optimization_strategy = lowerstring( optimization_strategy );
00170     if( optimization_strategy == "" )
00171         optimization_strategy = "standard";
00172     if ( optimization_strategy == "half" )
00173          bprop_all_terms = false;
00174     else if ( optimization_strategy == "random_half" )
00175     {
00176          bprop_all_terms = false;
00177          random_index_during_bprop = true;
00178     }
00179     else if ( optimization_strategy != "standard" )
00180          PLERROR( "LayerCostModule::build() does not recognize"
00181                   "optimization_strategy '%s'", optimization_strategy.c_str() );
00182 
00183     cost_function = lowerstring( cost_function );
00184     // choose HERE the *default* cost function
00185     if( cost_function == "" )
00186         cost_function = "pascal";
00187     if( ( cost_function_completename == "" ) || !string_ends_with(cost_function_completename, cost_function) )
00188         cost_function_completename = string(cost_function);
00189 
00190      // list HERE all *stochastic* cost functions
00191     if( ( cost_function == "stochastic_cross_entropy" )
00192      || ( cost_function == "stochastic_kl_div" ) )
00193         is_cost_function_stochastic = true;
00194 
00195     // list HERE all *non stochastic* cost functions
00196     // and the specific initialization
00197     else if( ( cost_function == "kl_div" )
00198           || ( cost_function == "kl_div_simple" ) )
00199     {
00200         is_cost_function_stochastic = false;
00201         if( input_size > 0 )
00202             inputs_histo.resize(input_size,histo_size);
00203         HISTO_STEP = 1.0/(real)histo_size;
00204 
00205         if( cost_function == "kl_div" )
00206         {
00207             cache_differ_count_i.resize(input_size);
00208             cache_differ_count_j.resize(input_size);
00209             cache_n_differ.resize(input_size);
00210             for( int i = 0; i < input_size; i ++)
00211             {
00212                 cache_differ_count_i[i].resize(i);
00213                 cache_differ_count_j[i].resize(i);
00214                 cache_n_differ[i].resize(i);
00215                 for( int j = 0; j < i; j ++)
00216                 {
00217                     cache_differ_count_i[i][j].resize(histo_size);
00218                     cache_differ_count_j[i][j].resize(histo_size);
00219                     cache_n_differ[i][j].resize(histo_size);
00220                 }
00221             }
00222         }
00223     }
00224     else if( ( cost_function == "pascal" )
00225           || ( cost_function == "correlation" ) )
00226     {
00227         is_cost_function_stochastic = false;
00228         if( ( input_size > 0 ) && (momentum > 0.0) )
00229         {
00230             inputs_expectation_trainMemory.resize(input_size);
00231             inputs_cross_quadratic_mean_trainMemory.resize(input_size,input_size);
00232         }
00233         cost_function_completename = addprepostfix( penalty_function, "_", cost_function );
00234         LINEAR_FUNC = false;
00235         SQUARE_FUNC = false;
00236         POW4_FUNC = false;
00237         EXP_FUNC = false;
00238         LOG_FUNC = false;
00239         penalty_function = lowerstring( penalty_function );
00240         if( penalty_function == "linear" )
00241             LINEAR_FUNC = true;
00242         else if( penalty_function == "square" )
00243             SQUARE_FUNC = true;
00244         else if( penalty_function == "pow4" )
00245             POW4_FUNC = true;
00246         else if( penalty_function == "exp" )
00247             EXP_FUNC = true;
00248         else if( penalty_function == "log" )
00249             LOG_FUNC = true;
00250         else
00251             PLERROR("LayerCostModule::build_() does not recognize penalty function '%s'",
00252                     penalty_function.c_str());
00253     }
00254     else
00255         PLERROR("LayerCostModule::build_() does not recognize cost function '%s'",
00256                  cost_function.c_str());
00257 
00258     // The port story...
00259     ports.resize(0);
00260     portname_to_index.clear();
00261     addPortName("input");
00262     addPortName("cost");
00263 
00264     port_sizes.resize(nPorts(), 2);
00265     port_sizes.fill(-1);
00266     port_sizes(getPortIndex("input"), 1) = input_size;
00267     port_sizes(getPortIndex("cost"), 1) = 1;
00268 }
00269 
00270 void LayerCostModule::build()
00271 {
00272     inherited::build();
00273     build_();
00274 }
00275 
00276 void LayerCostModule::forget()
00277 {
00278     inputs_histo.clear();
00279 
00280     inputs_expectation.clear();
00281     inputs_stds.clear();
00282 
00283     inputs_correlations.clear();
00284     inputs_cross_quadratic_mean.clear();
00285     if( momentum > 0.0)
00286     {
00287         inputs_expectation_trainMemory.clear();
00288         inputs_cross_quadratic_mean_trainMemory.clear();
00289     }
00290     one_count = 0.;
00291     stage = 0;
00292     average_deriv = 0.;
00293 }
00294 
00295 void LayerCostModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00296 {
00297     inherited::makeDeepCopyFromShallowCopy(copies);
00298 
00299     deepCopyField(inputs_histo, copies);
00300 
00301     deepCopyField(inputs_expectation, copies);
00302     deepCopyField(inputs_stds, copies);
00303 
00304     deepCopyField(inputs_correlations, copies);
00305     deepCopyField(inputs_cross_quadratic_mean, copies);
00306 
00307     deepCopyField(inputs_expectation_trainMemory, copies);
00308     deepCopyField(inputs_cross_quadratic_mean_trainMemory, copies);
00309 
00310     deepCopyField(cache_differ_count_i, copies);
00311     deepCopyField(cache_differ_count_j, copies);
00312     deepCopyField(cache_n_differ, copies);
00313 
00314     deepCopyField(ports, copies);
00315 }
00316 
00317 
00319 // fprop //
00321 
00322 
00323 void LayerCostModule::fprop(const TVec<Mat*>& ports_value)
00324 {
00325     Mat* p_inputs = ports_value[getPortIndex("input")];
00326     Mat* p_costs = ports_value[getPortIndex("cost")];
00327 
00328 
00329     PLASSERT( ports_value.length() == nPorts() );
00330 
00331     if ( p_costs && p_costs->isEmpty() )
00332     {
00333         PLASSERT( p_inputs && !p_inputs->isEmpty() );
00334         //cout << "fprop" << endl;
00335         fprop(*p_inputs, *p_costs);
00336     }
00337 }
00338 
00339 void LayerCostModule::fprop(const Mat& inputs, const Mat& targets, Mat& costs) const
00340 {
00341     fprop( inputs, costs );
00342 }
00343 
00344 void LayerCostModule::fprop(const Mat& inputs, Mat& costs) const
00345 {
00346     PLASSERT( input_size > 1 );
00347     int n_samples = inputs.length();
00348     costs.resize( n_samples, output_size );
00349 
00350     // The fprop will be done during training (only needed computations)
00351     if( during_training )
00352     {
00353         costs.fill( MISSING_VALUE );
00354         return;
00355     }
00356     else
00357         costs.clear();
00358 
00359     if( !is_cost_function_stochastic )
00360     {
00361         PLASSERT( inputs.width() == input_size );
00362 
00363         if( cost_function == "kl_div" )
00364         {
00389 
00390 
00391             Mat histo;
00392             computeHisto( inputs, histo );
00393             costs(0,0) = computeKLdiv( histo );
00394         }
00395         else if( cost_function == "kl_div_simple" )
00396         {
00405 
00406             Mat histo;
00407             computeSafeHisto( inputs, histo );
00408 
00409             // Computing the KL divergence
00410             for (int i = 0; i < input_size; i++)
00411                 for (int j = 0; j < i; j++)
00412                     for (int k = 0; k < histo_size; k++)
00413                         costs(0,0) += KLdivTerm( histo(i,k), histo(j,k));
00414 
00415             // Normalization w.r.t. number of units
00416             costs(0,0) *= norm_factor;
00417         }
00418         else if( cost_function == "pascal" )
00419         {
00431 
00432             Vec expectation;
00433             Mat cross_quadratic_mean;
00434             computePascalStatistics( inputs, expectation, cross_quadratic_mean );
00435 
00436             // Computing the cost
00437             for (int i = 0; i < input_size; i++)
00438             {
00439                 if (alpha > 0.0 )
00440                     costs(0,0) -= alpha * func_( expectation[i] ) *(real)(input_size-1);
00441                 for (int j = 0; j < i; j++)
00442                     costs(0,0) += func_( cross_quadratic_mean(i,j) );
00443             }
00444             costs(0,0) *= norm_factor;
00445         }
00446         else if( cost_function == "correlation" )
00447         {
00462 
00463             Vec expectation;
00464             Mat cross_quadratic_mean;
00465             Vec stds;
00466             Mat correlations;
00467             computeCorrelationStatistics( inputs, expectation, cross_quadratic_mean, stds, correlations );
00468 
00469             // Computing the cost
00470             for (int i = 0; i < input_size; i++)
00471                 for (int j = 0; j < i; j++)
00472                     costs(0,0) += func_( correlations(i,j) );
00473 
00474             costs(0,0) *= norm_factor;
00475         }
00476     }
00477     else // stochastic cost function
00478         for (int isample = 0; isample < n_samples; isample++)
00479             fprop(inputs(isample), costs(isample,0));
00480 }
00481 
00482 void LayerCostModule::fprop(const Vec& input, real& cost) const
00483 {
00484     PLASSERT( input.size() == input_size );
00485     PLASSERT( is_cost_function_stochastic );
00486 
00487     cost = 0.0;
00488     real  qi, qj, comp_qi, comp_qj; // The outputs (units i,j)
00489                                     // and some basic operations on it (e.g.: 1-qi, qi/(1-qi))
00490 
00491     if( cost_function == "stochastic_cross_entropy" )
00492     {
00507 
00508         for( int i = 0; i < input_size; i++ )
00509         {
00510            qi = input[i];
00511            comp_qi = 1.0 - qi;
00512            for( int j = 0; j < i; j++ )
00513            {
00514                qj = input[j];
00515                comp_qj = 1.0 - qj;
00516 
00517                // H(pi||pj) = H(pi) + D_{KL}(pi||pj)
00518                cost += qi*safeflog(qj) + comp_qi*safeflog(comp_qj);
00519 
00520                // The symetric part (loop  j=i+1...size)
00521                cost += qj*safeflog(qi) + comp_qj*safeflog(comp_qi);
00522            }
00523         }
00524         // Normalization w.r.t. number of units
00525         cost *= norm_factor;
00526     }
00527 
00528     else if( cost_function == "stochastic_kl_div" )
00529     {
00544 
00545         for( int i = 0; i < input_size; i++ )
00546         {
00547            qi = input[i];
00548            if(fast_exact_is_equal(qi, 1.0))
00549                comp_qi = REAL_MAX;
00550            else
00551                comp_qi = qi/(1.0 - qi);
00552 
00553            for( int j = 0; j < i; j++ )
00554            {
00555                qj = input[j];
00556                if(fast_exact_is_equal(qj, 1.0))
00557                    comp_qj = REAL_MAX;
00558                else
00559                    comp_qj = qj/(1.0 - qj);
00560 
00561                //     - D_{KL}(pi||pj) - D_{KL}(pj||pi)
00562                cost += (qj-qi)*safeflog(comp_qi/comp_qj);
00563            }
00564         }
00565         // Normalization w.r.t. number of units
00566         cost *= norm_factor;
00567     }
00568 
00569     else
00570         PLERROR("LayerCostModule::fprop() not implemented for cost_cfunction '%s'\n"
00571                 "- It may be a printing error.\n"
00572                 "- You can try to call LayerCostModule::fprop(const Mat& inputs, Mat& costs)"
00573                 "  if your cost function is non stochastic.\n"
00574                 "- Or else write the code corresponding to your cost function.\n",
00575                  cost_function.c_str());
00576 }
00577 
00578 
00579 
00580 
00582 // bpropUpdate //
00584 
00585 
00586 void LayerCostModule::bpropUpdate(const Mat& inputs,
00587                                   const Mat& targets,
00588                                   const Vec& costs,
00589                                   Mat& inputs_grad, bool accumulate)
00590 {
00591     bpropUpdate( inputs, inputs_grad);
00592 }
00593 
00594 void LayerCostModule::bpropAccUpdate(const TVec<Mat*>& ports_value,
00595                                      const TVec<Mat*>& ports_gradient)
00596 {
00597     PLASSERT( input_size > 1 );
00598     PLASSERT( ports_value.length() == nPorts() );
00599     PLASSERT( ports_gradient.length() == nPorts() );
00600 
00601     const Mat* p_inputs = ports_value[getPortIndex("input")];
00602     Mat* p_inputs_grad = ports_gradient[getPortIndex("input")];
00603     Mat* p_cost_grad = ports_gradient[getPortIndex("cost")];
00604 
00605     if( p_inputs_grad && p_inputs_grad->isEmpty()
00606         && p_cost_grad && !p_cost_grad->isEmpty() )
00607     {
00608         PLASSERT( p_inputs && !p_inputs->isEmpty());
00609         int n_samples = p_inputs->length();
00610         PLASSERT( p_cost_grad->length() == n_samples );
00611         PLASSERT( p_cost_grad->width() == 1 );
00612 
00613         bpropUpdate( *p_inputs, *p_inputs_grad);
00614 
00615         for( int isample = 0; isample < n_samples; isample++ )
00616             for( int i = 0; i < input_size; i++ )
00617                 (*p_inputs_grad)(isample, i) *= (*p_cost_grad)(isample,0);
00618 
00619         checkProp(ports_gradient);
00620     }
00621     else if( !p_inputs_grad && !p_cost_grad )
00622         return;
00623     else
00624         PLERROR("In LayerCostModule::bpropAccUpdate - Port configuration not implemented.");
00625 
00626 }
00627 
00631 void LayerCostModule::bpropUpdate(const Mat& inputs,
00632                                   Mat& inputs_grad)
00633 {
00634     if( random_index_during_bprop )
00635         PLERROR("LayerCostModule::bpropUpdate with random_index_during_bprop not implemented yet.");
00636 
00637     PLASSERT( inputs.width() == input_size );
00638     inputs_grad.resize(inputs.length(), input_size );
00639     inputs_grad.clear();
00640 
00641     int n_samples = inputs.length();
00642     inputs_grad.resize(n_samples, input_size);
00643     inputs_grad.clear();
00644 
00645     stage += n_samples;
00646     if( (nstages_max>0) && (stage > nstages_max) )
00647         return;
00648 
00649     //cout << "bpropAccUpdate" << endl;
00650 
00651     if( cost_function == "stochastic_cross_entropy" )
00652     {
00653         for (int isample = 0; isample < n_samples; isample++)
00654         {
00655             real qi, qj, comp_qi, comp_qj;
00656             Vec comp_q(input_size), log_term(input_size);
00657             for (int i = 0 ; i < input_size ; i++ )
00658             {
00659                 qi = inputs(isample,i);
00660                 comp_qi = 1.0 - qi;
00661                 comp_q[i] = comp_qi;
00662                 log_term[i] = safeflog(qi) - safeflog(comp_qi);
00663             }
00664             for (int i = 0; i < input_size; i++ )
00665             {
00666                 qi = inputs(isample,i);
00667                 comp_qi = comp_q[i];
00668                 for (int j = 0; j < i; j++ )
00669                 {
00670                     qj = inputs(isample,j);
00671                     comp_qj=comp_q[j];
00672                     // log(pj) - log(1-pj) + pj/pi - (1-pj)/(1-pi)
00673                     inputs(isample,i) += log_term[j] + qj/qi - comp_qi/comp_qj;
00674                     // The symetric part (loop  j=i+1...input_size)
00675                     if( bprop_all_terms )
00676                         inputs(isample,j) += log_term[i] + qi/qj - comp_qj/comp_qi;
00677                 }
00678             }
00679                 for (int i = 0; i < input_size; i++ )
00680                     inputs_grad(isample, i) *= norm_factor;
00681         }
00682     } // END cost_function == "stochastic_cross_entropy"
00683 
00684     else if( cost_function == "stochastic_kl_div" )
00685     {
00686         for (int isample = 0; isample < n_samples; isample++)
00687         {
00688             real qi, qj, comp_qi, comp_qj;
00689             Vec comp_q(input_size), log_term(input_size);
00690             for (int i = 0; i < input_size; i++ )
00691             {
00692                 qi = inputs(isample,i);
00693                 comp_qi = 1.0 - qi;
00694                 if(fast_exact_is_equal(qi, 1.0) || fast_exact_is_equal(qi, 0.0))
00695                     comp_q[i] = REAL_MAX;
00696                 else
00697                     comp_q[i] = 1.0/(qi*comp_qi);
00698                 log_term[i] = safeflog(qi) - safeflog(comp_qi);
00699             }
00700             for (int i = 0; i < input_size; i++ )
00701             {
00702                 qi = inputs(isample,i);
00703                 comp_qi = comp_q[i];
00704 
00705                 for (int j = 0; j < i ; j++ )
00706                 {
00707                     qj = inputs(isample,j);
00708                     comp_qj=comp_q[j];
00709                     //   [qj - qi]/[qi (1-qi)] - log[ qi/(1-qi) * (1-qj)/qj]
00710                     inputs_grad(isample,i) += (qj - qi)*comp_qi - log_term[i] + log_term[j];
00711                     // The symetric part (loop  j=i+1...input_size)
00712                     if( bprop_all_terms )
00713                         inputs_grad(isample,j) += (qi - qj)*comp_qj - log_term[j] + log_term[i];
00714                 }
00715             }
00716             for (int i = 0; i < input_size; i++ )
00717                 inputs_grad(isample, i) *= norm_factor;
00718         }
00719     } // END cost_function == "stochastic_kl_div"
00720 
00721     else if( cost_function == "kl_div" )
00722     {
00723         computeHisto(inputs);
00724         real cost_before = computeKLdiv( true );
00725 
00726         if( !bprop_all_terms )
00727             PLERROR("kl_div with bprop_all_terms=false not implemented yet");
00728 
00729         for (int isample = 0; isample < n_samples; isample++)
00730         {
00731             real qi, qj;
00732             // Computing the difference of KL divergence
00733             // for d_q
00734             for (int i = 0; i < input_size; i++)
00735             {
00736                 qi=inputs(isample,i);
00737                 if( histo_index(qi) < histo_size-1 )
00738                 {
00739                     inputs(isample,i) += dq(qi);
00740                     computeHisto(inputs);
00741                     real cost_after = computeKLdiv( false );
00742                     inputs(isample,i) -= dq(qi);
00743                     inputs_grad(isample, i) = (cost_after - cost_before)*1./dq(qi);
00744                 }
00745                 //else inputs_grad(isample, i) = 0.;
00746 
00747                 continue;
00748 
00749                 inputs_grad(isample, i) = 0.;
00750 
00751                 qi = inputs(isample,i);
00752                 int index_i = histo_index(qi);
00753                 if( ( index_i == histo_size-1 ) ) // we do not care about this...
00754                     continue;
00755                 real over_dqi=1.0/dq(qi);
00756                 // qi + dq(qi) ==> | p_inputs_histo(i,index_i)   - one_count
00757                 //                 \ p_inputs_histo(i,index_i+shift_i) + one_count
00758 
00759                 for (int j = 0; j < i; j++)
00760                 {
00761                     inputs_grad(isample, i) += delta_KLdivTerm(i, j, index_i, over_dqi);
00762 
00763                     qj = inputs(isample,j);
00764                     int index_j = histo_index(qj);
00765                     if( ( index_j == histo_size-1 ) )
00766                         continue;
00767                     real over_dqj=1.0/dq(qj);
00768                     // qj + dq(qj) ==> | p_inputs_histo(j,index_j)   - one_count
00769                     //                 \ p_inputs_histo(j,index_j+shift_j) + one_count
00770 
00771                     inputs_grad(isample, j) += delta_KLdivTerm(j, i, index_j, over_dqj);
00772                 }
00773             }
00774         }
00775     } // END cost_function == "kl_div"
00776 
00777     else if( cost_function == "kl_div_simple" )
00778     {
00779         computeSafeHisto(inputs);
00780 
00781         for (int isample = 0; isample < n_samples; isample++)
00782         {
00783             // Computing the difference of KL divergence
00784             // for d_q
00785             real qi, qj;
00786             for (int i = 0; i < input_size; i++)
00787             {
00788                 inputs_grad(isample, i) = 0.0;
00789 
00790                 qi = inputs(isample,i);
00791                 int index_i = histo_index(qi);
00792                 if( ( index_i == histo_size-1 ) ) // we do not care about this...
00793                     continue;
00794                 real over_dqi=1.0/dq(qi);
00795                 // qi + dq(qi) ==> | p_inputs_histo(i,index_i)   - one_count
00796                 //                 \ p_inputs_histo(i,index_i+shift_i) + one_count
00797 
00798                 for (int j = 0; j < i; j++)
00799                 {
00800                     inputs_grad(isample, i) += delta_SafeKLdivTerm(i, j, index_i, over_dqi);
00801 
00802                     if( bprop_all_terms )
00803                     {
00804                         qj = inputs(isample,j);
00805                         int index_j = histo_index(qj);
00806                         if( ( index_j == histo_size-1 ) || ( index_j == 0 ) )
00807                             continue;
00808                         real over_dqj=1.0/dq(qj);
00809                         // qj + dq(qj) ==> | p_inputs_histo(j,index_j)   - one_count
00810                         //                 \ p_inputs_histo(j,index_j+shift_j) + one_count
00811 
00812                         inputs_grad(isample, j) += delta_SafeKLdivTerm(j, i, index_j, over_dqj);
00813                     }
00814                 }
00815             }
00816 
00817             // Normalization
00818             for (int i = 0; i < input_size; i++ )
00819                 inputs_grad(isample, i) *= norm_factor;
00820         }
00821     } // END cost_function == "kl_div simple"
00822 
00823     else if( cost_function == "pascal" )
00824     {
00825         computePascalStatistics( inputs );
00826 
00827         for (int isample = 0; isample < n_samples; isample++)
00828         {
00829             real qi, qj;
00830             for (int i = 0; i < input_size; i++)
00831             {
00832                 qi = inputs(isample, i);
00833                 if (alpha > 0.0 )
00834                     inputs_grad(isample, i) -= alpha*deriv_func_(inputs_expectation[i])
00835                                                     *(real)(input_size-1);
00836                 for (int j = 0; j < i; j++)
00837                 {
00838                     real d_temp = deriv_func_(inputs_cross_quadratic_mean(i,j));
00839                     qj = inputs(isample,j);
00840                     inputs_grad(isample, i) += d_temp *qj;
00841                     if( bprop_all_terms )
00842                         inputs_grad(isample, j) += d_temp *qi;
00843                 }
00844             }
00845             for (int i = 0; i < input_size; i++)
00846                 inputs_grad(isample, i) *= norm_factor * (1.-momentum);
00847         }
00848     } // END cost_function == "pascal"
00849 
00850     else if( cost_function == "correlation")
00851     {
00852         computeCorrelationStatistics( inputs );
00853 
00854         real average_deriv_tmp = 0.;
00855         for (int isample = 0; isample < n_samples; isample++)
00856         {
00857             real qi, qj;
00858             Vec dSTDi_dqi( input_size ), dCROSSij_dqj( input_size );
00859             for (int i = 0; i < input_size; i++)
00860             {
00861                 if( fast_exact_is_equal( inputs_stds[i], 0. ) )
00862                 {
00863                     if( isample == 0 )
00864                         PLWARNING("wired phenomenon: the %dth output have always expectation %f ( at stage=%d )",
00865                                    i, inputs_expectation[i], stage);
00866                     if( inputs_expectation[i] < 0.1 )
00867                     {
00868                         // We force to switch on the neuron
00869                         // (the cost increase much when the expectation is decreased \ 0)
00870                         if( ( isample > 0 ) || ( n_samples == 1 ) )
00871                              inputs_grad(isample, i) -= average_deriv;
00872                     }
00873                     else if( inputs_expectation[i] > 0.9 )
00874                     {
00875                         // We force to switch off the neuron
00876                         // (the cost increase much when we the expectation is increased / 1)
00877                         // except for the first sample
00878                         if( ( isample > 0 ) || ( n_samples == 1 ) )
00879                             inputs_grad(isample, i) += average_deriv;
00880                     }
00881                     else
00882                         if ( !(inputs_expectation[i]>-REAL_MAX) || !(inputs_expectation[i]<REAL_MAX)  )
00883                            PLERROR("The %dth output have always value %f ( at stage=%d )",
00884                                     i, inputs_expectation[i], stage);
00885                     continue;
00886                 }
00896 
00897                 qi = inputs(isample, i);
00898                 dCROSSij_dqj[i] = ( qi - inputs_expectation[i] ); //*one_count;
00899                 dSTDi_dqi[i] = dCROSSij_dqj[i] / inputs_stds[i];
00900 
00901                 for (int j = 0; j < i; j++)
00902                 {
00903                     if( fast_exact_is_equal( inputs_correlations(i,j), 0.) )
00904                     {
00905                         if (isample == 0)
00906                             PLWARNING("correlation(i,j)=0 for i=%d, j=%d", i, j);
00907                         continue;
00908                     }
00909                     qj = inputs(isample,j);
00910                     real correlation_denum = inputs_stds[i]*inputs_stds[j];
00911                     real squared_correlation_denum = correlation_denum * correlation_denum;
00912                     if( fast_exact_is_equal( squared_correlation_denum, 0. ) )
00913                         continue;
00914                     real dfunc_dCorr = deriv_func_( inputs_correlations(i,j) );
00915                     real correlation_num = ( inputs_cross_quadratic_mean(i,j)
00916                                              - inputs_expectation[i]*inputs_expectation[j] );
00917 
00918                     if( correlation_num/correlation_denum - inputs_correlations(i,j) > 0.0000001 )
00919                         PLERROR( "num/denum (%f) <> correlation (%f) for (i,j)=(%d,%d)",
00920                                  correlation_num/correlation_denum, inputs_correlations(i,j),i,j);
00921 
00922                     inputs_grad(isample, i) += dfunc_dCorr * (
00923                                                  correlation_denum * dCROSSij_dqj[j]
00924                                                - correlation_num * dSTDi_dqi[i] * inputs_stds[j]
00925                                                  ) / squared_correlation_denum;
00926 
00927                     if( bprop_all_terms )
00928                         inputs_grad(isample, j) += dfunc_dCorr * (
00929                                                      correlation_denum * dCROSSij_dqj[i]
00930                                                    - correlation_num * dSTDi_dqi[j] * inputs_stds[i]
00931                                                      ) / squared_correlation_denum;
00932                 }
00933             }
00934             for (int i = 0; i < input_size; i++)
00935             {
00936                 average_deriv_tmp += fabs( inputs_grad(isample, i) );
00937                 inputs_grad(isample, i) *= norm_factor * (1.-momentum);
00938             }
00939         }
00940         average_deriv = average_deriv_tmp / (real)( input_size * n_samples );
00941         PLASSERT( average_deriv >= 0.);
00942     } // END cost_function == "correlation"
00943 
00944     else
00945         PLERROR("LayerCostModule::bpropAccUpdate() not implemented for cost function %s",
00946                  cost_function.c_str());
00947 }
00948 
00949 
00951 // Auxiliary Functions for Pascal's cost function //
00953 void LayerCostModule::computePascalStatistics(const Mat& inputs)
00954 {
00955      computePascalStatistics( inputs,
00956                               inputs_expectation, inputs_cross_quadratic_mean);
00957 }
00958 
00959 void LayerCostModule::computePascalStatistics(const Mat& inputs,
00960                                               Vec& expectation, Mat& cross_quadratic_mean) const
00961 {
00962     int n_samples = inputs.length();
00963     one_count = 1. / (real)n_samples;
00964     Vec input;
00965 
00966     expectation.resize( input_size );
00967     expectation.clear();
00968     cross_quadratic_mean.resize(input_size,input_size);
00969     cross_quadratic_mean.clear();
00970 
00971     inputs_expectation.clear();
00972     inputs_cross_quadratic_mean.clear();
00973 
00974     for (int isample = 0; isample < n_samples; isample++)
00975     {
00976         input = inputs(isample);
00977         for (int i = 0; i < input_size; i++)
00978         {
00979             expectation[i] += input[i];
00980             for (int j = 0; j < i; j++)
00981                  cross_quadratic_mean(i,j) += input[i] * input[j];
00982         }
00983     }
00984 
00985     for (int i = 0; i < input_size; i++)
00986     {
00987         expectation[i] *= one_count;
00988         for (int j = 0; j < i; j++)
00989              cross_quadratic_mean(i,j) *= one_count;
00990     }
00991     if( ( momentum > 0.0 ) && during_training )
00992     {
00993         for (int i = 0; i < input_size; i++)
00994         {
00995             expectation[i] = momentum*inputs_expectation_trainMemory[i]
00996                                          +(1.0-momentum)*expectation[i];
00997             inputs_expectation_trainMemory[i] = expectation[i];
00998             for (int j = 0; j < i; j++)
00999             {
01000                  cross_quadratic_mean(i,j) = momentum*inputs_cross_quadratic_mean_trainMemory(i,j)
01001                                                        +(1.0-momentum)*cross_quadratic_mean(i,j);
01002                  inputs_cross_quadratic_mean_trainMemory(i,j) = cross_quadratic_mean(i,j);
01003             }
01004         }
01005     }
01006 }
01007 
01008 real LayerCostModule::func_(real value) const
01009 {
01010     if( SQUARE_FUNC )
01011         return value * value;
01012     if( POW4_FUNC )
01013         return value * value * value * value;
01014     if( LOG_FUNC )
01015     {
01016         if( fast_is_equal( value, 1. ) || value > 1. )
01017             return REAL_MAX;
01018         return -safeflog( 1.-value );
01019     }
01020     if( EXP_FUNC )
01021         return exp(value);
01022     if( LINEAR_FUNC )
01023         return value;
01024     PLERROR("in LayerCostModule::func_() no boolean *_FUNC has been set.");
01025     return REAL_MAX;
01026 }
01027 real LayerCostModule::deriv_func_(real value) const
01028 {
01029     if( SQUARE_FUNC )
01030         return 2. * value;
01031     if( POW4_FUNC )
01032         return 4. * value * value * value;
01033     if( LOG_FUNC )
01034     {
01035         if( fast_is_equal( value, 1. ) )
01036            return REAL_MAX;
01037         return 1. / (1. - value);
01038     }
01039     if( EXP_FUNC )
01040         return exp(value);
01041     if( LINEAR_FUNC )
01042         return 1.;
01043     PLERROR("in LayerCostModule::deriv_func_() no boolean *_FUNC has been set.");
01044     return REAL_MAX;
01045 }
01046 
01047 
01048 void LayerCostModule::computeCorrelationStatistics(const Mat& inputs)
01049 {
01050     computeCorrelationStatistics(inputs,
01051                                  inputs_expectation, inputs_cross_quadratic_mean,
01052                                  inputs_stds, inputs_correlations);
01053 }
01054 
01059 void LayerCostModule::computeCorrelationStatistics(const Mat& inputs,
01060                                                    Vec& expectation, Mat& cross_quadratic_mean,
01061                                                    Vec& stds, Mat& correlations) const
01062 {
01063     int n_samples = inputs.length();
01064     one_count = 1. / (real)n_samples;
01065     Vec input;
01066 
01067     expectation.resize( input_size );
01068     expectation.clear();
01069     cross_quadratic_mean.resize(input_size,input_size);
01070     cross_quadratic_mean.clear();
01071     stds.resize( input_size );
01072     stds.clear();
01073     correlations.resize(input_size,input_size);
01074     correlations.fill(1.); // The default correlation is 1
01075 
01076     for (int isample = 0; isample < n_samples; isample++)
01077     {
01078         input = inputs(isample);
01079         for (int i = 0; i < input_size; i++)
01080         {
01081             expectation[i] += input[i];
01082             cross_quadratic_mean(i,i) += input[i] * input[i];
01083             for (int j = 0; j < i; j++)
01084                  cross_quadratic_mean(i,j) += input[i] * input[j];
01085         }
01086     }
01087 
01088     for (int i = 0; i < input_size; i++)
01089     {
01091         expectation[i] *= one_count;
01092         cross_quadratic_mean(i,i) *= one_count;
01093 
01094         if( fast_is_equal(momentum, 0.)
01095             ||  !during_training )
01096         {
01099             real tmp = cross_quadratic_mean(i,i) - expectation[i] * expectation[i];
01100             if( tmp > 0. )  //  std[i] = 0 by default
01101                 stds[i] = sqrt( tmp );
01102         }
01103 
01104         for (int j = 0; j < i; j++)
01105         {
01107             cross_quadratic_mean(i,j) *= one_count;
01108 
01109             if( fast_is_equal(momentum, 0.)
01110                 ||  !during_training )
01111             {
01113                 real tmp = stds[i] * stds[j];
01114                 if( !fast_is_equal(tmp, 0.) )  //  correlations(i,j) = 1 by default
01115                     correlations(i,j) = ( cross_quadratic_mean(i,j)
01116                                           - expectation[i]*expectation[j] ) / tmp;
01117             }
01118         }
01119     }
01120 
01121     if( !fast_is_equal(momentum, 0.) && during_training )
01122     {
01123         for (int i = 0; i < input_size; i++)
01124         {
01125             expectation[i] = momentum*inputs_expectation_trainMemory[i]
01126                              +(1.0-momentum)*expectation[i];
01127 
01128             inputs_expectation_trainMemory[i] = expectation[i];
01129 
01130             cross_quadratic_mean(i,i) = momentum*inputs_cross_quadratic_mean_trainMemory(i,i)
01131                                         +(1.0-momentum)*cross_quadratic_mean(i,i);
01132             inputs_cross_quadratic_mean_trainMemory(i,i) = cross_quadratic_mean(i,i);
01133 
01134             real tmp = cross_quadratic_mean(i,i) - expectation[i] * expectation[i];
01135             if( tmp > 0. )  //  std[i] = 0 by default
01136                 stds[i] = sqrt( tmp );
01137 
01138             for (int j = 0; j < i; j++)
01139             {
01140                 cross_quadratic_mean(i,j) = momentum*inputs_cross_quadratic_mean_trainMemory(i,j)
01141                     +(1.0-momentum)*cross_quadratic_mean(i,j);
01142                 inputs_cross_quadratic_mean_trainMemory(i,j) = cross_quadratic_mean(i,j);
01143 
01144                 tmp = stds[i] * stds[j];
01145                  if( !fast_is_equal(tmp, 0.) )  //  correlations(i,j) = 1 by default
01146                      correlations(i,j) = ( cross_quadratic_mean(i,j)
01147                                          - expectation[i]*expectation[j] ) / tmp;
01148 
01149             }
01150         }
01151     }
01152 }
01153 
01155 // Auxiliary Functions //
01157 real LayerCostModule::computeKLdiv(const Mat& histo) const
01158 {
01159     PLASSERT( histo.length() == input_size );
01160     PLASSERT( histo.width() == histo_size );
01161     real cost = 0.;
01162     for (int i = 0; i < input_size; i++)
01163         for (int j = 0; j < i; j++)
01164         {
01165             // These variables are used in case one bin of
01166             // the histogram is empty for one unit
01167             // and not for another one ( (Nj-Ni).log(Ni/Nj) = nan ).
01168             // In such case, we ''differ'' the count for the next bin and so on.
01169             real differ_count_i = 0.;
01170             real differ_count_j = 0.;
01171             int n_differ = 0;
01172 //                    real last_positive_Ni_k, last_positive_Nj_k;
01173 //                    int last_n_differ;
01174             for (int k = 0; k < histo_size; k++)
01175             {
01176                 real Ni_k = histo( i, k ) + differ_count_i;
01177                 real Nj_k = histo( j, k ) + differ_count_j;
01178                 if( fast_exact_is_equal(Ni_k, 0.0) )
01179                 {
01180                     differ_count_j = Nj_k;
01181                     n_differ += 1;
01182                 }
01183                 else if( fast_exact_is_equal(Nj_k, 0.0) )
01184                 {
01185                     differ_count_i = Ni_k;
01186                     n_differ += 1;
01187                 }
01188                 else
01189                 {
01190                     cost += KLdivTerm( Ni_k, Nj_k ) *(real)(1+n_differ) *HISTO_STEP;
01191                     differ_count_i = 0.0;
01192                     differ_count_j = 0.0;
01193                     n_differ = 0;
01194 //                            last_positive_Ni_k = Ni_k;
01195 //                            last_positive_Nj_k = Nj_k;
01196 //                            last_n_differ = n_differ;
01197                 }
01198             }
01199 //                    if( differ_count_i > 0.0 )
01200 //                    {
01201 //                        "cas ou on regroupe avec le dernier";
01202 //                        cost -= KLdivTerm(last_positive_Ni_k,last_positive_Nj_k)
01203 //                                  *(real)(1+last_n_differ) *HISTO_STEP;
01204 //                        cost += KLdivTerm(last_positive_Ni_k+differ_count_i,last_positive_Nj_k)
01205 //                                 *(real)(1+last_n_differ+n_differ) *HISTO_STEP;
01206 //                    }
01207 //
01208 //                    else if ( differ_count_j > 0.0 )
01209 //                    {
01210 //                        "cas ou on regroupe avec le dernier";
01211 //                        cost -= KLdivTerm(last_positive_Ni_k,last_positive_Nj_k)
01212 //                                 *(real)(1+last_n_differ) *HISTO_STEP;
01213 //                        cost += KLdivTerm(last_positive_Ni_k,last_positive_Nj_k+differ_count_j)
01214 //                                 *(real)(1+last_n_differ+n_differ) *HISTO_STEP;
01215 //                    }
01216         }
01217     // Normalization w.r.t. number of units
01218     return cost *norm_factor;
01219 }
01220 
01221 real LayerCostModule::computeKLdiv(bool store_in_cache)
01222 {
01223     if( store_in_cache )
01224     {
01225             real cost = 0.;
01226             for (int i = 0; i < input_size; i++)
01227                 for (int j = 0; j < i; j++)
01228                 {
01229                     // These variables are used in case one bin of
01230                     // the histogram is empty for one unit
01231                     // and not for another one ( (Nj-Ni).log(Ni/Nj) = nan ).
01232                     // In such case, we ''differ'' the count for the next bin and so on.
01233                     cache_differ_count_i[ i ][ j ].clear();
01234                     cache_differ_count_j[ i ][ j ].clear();
01235                     cache_n_differ[i][j].fill( 0. );
01236 //                    real last_positive_Ni_k, last_positive_Nj_k;
01237 //                    real last_n_differ;
01238                     for (int k = 0; k < histo_size; k++)
01239                     {
01240                         real Ni_k = inputs_histo(i,k) + cache_differ_count_i[i][j][ k ];
01241                         real Nj_k = inputs_histo(j,k) + cache_differ_count_j[i][j][ k ];
01242 
01243                         if( fast_exact_is_equal(Ni_k, 0.0) )
01244                         {
01245                             if( k < histo_size - 1 ) // "cas ou on regroupe avec le dernier";
01246                             {
01247                                 cache_differ_count_j[i][j][ k+1 ] = Nj_k;
01248                                 cache_n_differ[i][j][ k+1 ] = cache_n_differ[i][j][ k ] + 1;
01249                             }
01250                         }
01251                         else if( fast_exact_is_equal(Nj_k, 0.0) )
01252                         {
01253                             if( k < histo_size - 1 ) // "cas ou on regroupe avec le dernier";
01254                             {
01255                                 cache_differ_count_i[i][j][ k+1 ] = Ni_k;
01256                                 cache_n_differ[i][j][ k+1 ] = cache_n_differ[i][j][ k ] + 1;
01257                             }
01258                         }
01259                         else
01260                         {
01261                             cost += KLdivTerm( Ni_k, Nj_k ) *(real)(1 + cache_n_differ[i][j][ k ]) *HISTO_STEP;
01262 //                            last_positive_Ni_k = Ni_k;
01263 //                            last_positive_Nj_k = Nj_k;
01264 //                            last_n_differ = cache_n_differ[i][j][ k ];
01265                         }
01266 //                    if( cache_differ_count_i[i][j][ histo_size - 1 ] > 0.0 )
01267 //                        "cas ou on regroupe avec le dernier";
01268 //                    else if ( cache_differ_count_j[i][j][ histo_size - 1 ] > 0.0 )
01269 //                        "cas ou on regroupe avec le dernier";
01270                     }
01271                 }
01272             // Normalization w.r.t. number of units
01273             return cost *norm_factor;
01274     }
01275     else
01276         return computeKLdiv(inputs_histo);
01277 }
01278 
01279 
01280 real LayerCostModule::delta_KLdivTerm(int i, int j, int index_i, real over_dq)
01281 {
01282     PLASSERT( index_i < histo_size - 1 );
01283     // already tested in the code of BackPropAccUpdate()
01284     PLASSERT( over_dq > 0. );
01285     PLASSERT( inputs_histo( i, index_i ) > 0. );
01286     // Verifies that:
01287     // ( inputs_histo is up to date
01288     //   => ) the input(isample,i) has been counted
01289 
01290     real grad_update = 0.0;
01291 
01292     real Ni_ki, Nj_ki, Ni_ki_shift1, Nj_ki_shift1;
01293     real n_differ_before_ki, n_differ_before_ki_shift1;
01294 
01295     if( i > j ) // Because cache memory matrix are symmetric but not completely filled
01296     {
01297         Ni_ki        = inputs_histo( i, index_i     ) + cache_differ_count_i[ i ][ j ][ index_i ];
01298         Nj_ki        = inputs_histo( j, index_i     ) + cache_differ_count_j[ i ][ j ][ index_i ];
01299         Ni_ki_shift1 = inputs_histo( i, index_i + 1 ) + cache_differ_count_i[ i ][ j ][ index_i + 1 ];
01300         Nj_ki_shift1 = inputs_histo( j, index_i + 1 ) + cache_differ_count_j[ i ][ j ][ index_i + 1 ];
01301         n_differ_before_ki = cache_n_differ[ i ][ j ][ index_i ];
01302         n_differ_before_ki_shift1 = cache_n_differ[ i ][ j ][ index_i + 1 ];
01303     }
01304     else // ( i < j ) // Be very careful with indices here!
01305     {
01306         Ni_ki        = inputs_histo( i, index_i     ) + cache_differ_count_j[ j ][ i ][ index_i ];
01307         Nj_ki        = inputs_histo( j, index_i     ) + cache_differ_count_i[ j ][ i ][ index_i ];
01308         Ni_ki_shift1 = inputs_histo( i, index_i + 1 ) + cache_differ_count_j[ j ][ i ][ index_i + 1 ];
01309         Nj_ki_shift1 = inputs_histo( j, index_i + 1 ) + cache_differ_count_i[ j ][ i ][ index_i + 1 ];
01310         n_differ_before_ki = cache_n_differ[ j ][ i ][ index_i ];
01311         n_differ_before_ki_shift1 = cache_n_differ[ j ][ i ][ index_i + 1 ];
01312     }
01313     real additional_differ_count_j_after = 0.;
01314     real n_differ_after_ki = n_differ_before_ki;
01315     real n_differ_after_ki_shift1 = n_differ_before_ki_shift1;
01316 
01317     // What follows is only valuable when the qi's are increased (dq>0).
01318 
01319     if( !fast_exact_is_equal(Nj_ki, 0.0) )
01320     // if it is zero, then INCREASING qi will not change anything
01321     // (it was already counted in the next histograms's bin
01322     {
01323         // removing the term of the sum that will be modified
01324         grad_update -= KLdivTerm( Ni_ki,
01325                                   Nj_ki )
01326                        * ( 1 + n_differ_before_ki);
01327 
01328         if( fast_exact_is_equal(Ni_ki, one_count) )
01329         {
01330             additional_differ_count_j_after = Nj_ki;
01331             n_differ_after_ki_shift1 = n_differ_after_ki + 1;
01332                                   // = n_differ_before_ki + 1;
01333         }
01334         else
01335         {
01336             // adding the term of the sum with its modified value
01337             grad_update += KLdivTerm( Ni_ki - one_count,
01338                                       Nj_ki )
01339                            * ( 1 + n_differ_after_ki );
01340         }
01341 
01342         if( !fast_exact_is_equal(Nj_ki_shift1,0.0) )
01343         {
01344             // adding the term of the sum with its modified value
01345             grad_update += KLdivTerm( Ni_ki_shift1 + one_count,
01346                                           Nj_ki_shift1 + additional_differ_count_j_after )
01347                                * ( 1 + n_differ_after_ki_shift1 );
01348 
01349             if( !fast_exact_is_equal(Ni_ki_shift1, 0.0) ) // "cas ou on regroupe avec le dernier";
01350             {
01351                 // removing the term of the sum that will be modified
01352                 grad_update -= KLdivTerm( Ni_ki_shift1,
01353                                           Nj_ki_shift1 )
01354                                * ( 1 + n_differ_before_ki_shift1 );
01355             }
01356             else // ( Ni_ki_shift1 == 0.0 )
01357             {
01358                 // We search   ki' > k(i)+1   such that   n(i,ki') > 0
01359                 real additional_differ_count_j_before = 0.;
01360                 real additional_n_differ_before_ki_shift1 = 0.;
01361                 int ki;
01362                 for (ki = index_i+2; ki < histo_size; ki++)
01363                 {
01364                     additional_differ_count_j_before += inputs_histo( j, ki );
01365                     additional_n_differ_before_ki_shift1 += 1;
01366                     if( inputs_histo( i, ki )>0 )
01367                         break;
01368                 }
01369                 if( ki < histo_size )
01370                 {
01371                     grad_update -= KLdivTerm( inputs_histo( i, ki ),
01372                                               Nj_ki_shift1 + additional_differ_count_j_before )
01373                                    * ( 1 + n_differ_before_ki_shift1 + additional_n_differ_before_ki_shift1 );
01374 
01375                     if( additional_differ_count_j_before > 0. )
01376                     // We have to report the additional count for unit j
01377                     {
01378                         grad_update += KLdivTerm( inputs_histo( i, ki ),
01379                                                   additional_differ_count_j_before )
01380                                        * ( additional_n_differ_before_ki_shift1 );
01381                     }
01382                 }
01383             }
01384         }
01385         else // ( Nj_ki_shift1 == 0.0 )
01386         {
01387             real additional_differ_count_i_before = 0.;
01388             // We search kj > ki+1 tq inputs_histo( j, kj ) > 0.
01389             int kj;
01390             for( kj = index_i+2; kj < histo_size; kj++)
01391             {
01392                 additional_differ_count_i_before += inputs_histo( i, kj );
01393                 n_differ_before_ki_shift1 += 1;
01394                 if( inputs_histo( j, kj ) > 0. )
01395                     break;
01396             }
01397             if ( !fast_exact_is_equal(additional_differ_count_j_after, 0. ) )
01398                 n_differ_after_ki_shift1 = n_differ_before_ki_shift1;
01399             if( kj < histo_size )
01400             {
01401                 if ( fast_exact_is_equal(n_differ_after_ki_shift1, n_differ_before_ki_shift1) )
01402                 {
01403                     // ( no qj were differed after we changed count at bin ki )
01404                     // OR ( some qj were differed to bin ki+1 AND the bin were not empty )
01405                     grad_update += KLdivTerm( Ni_ki_shift1 + additional_differ_count_i_before + one_count,
01406                                               inputs_histo( j, kj ) + additional_differ_count_j_after )
01407                                    * ( 1 + n_differ_after_ki_shift1 );
01408                 }
01409                 else
01410                 {
01411                     PLASSERT( n_differ_before_ki_shift1 > n_differ_after_ki_shift1 );
01412                     grad_update += KLdivTerm( Ni_ki_shift1 + one_count,
01413                                               additional_differ_count_j_after )
01414                                    * ( 1 + n_differ_after_ki_shift1 );
01415                     grad_update += KLdivTerm( additional_differ_count_i_before,
01416                                               inputs_histo( j, kj ) )
01417                                    * ( n_differ_before_ki_shift1 - n_differ_after_ki_shift1 );
01418                 }
01419 
01420                 if( !fast_exact_is_equal(Ni_ki_shift1 + additional_differ_count_i_before,0.0) )
01421                 {
01422                     grad_update -= KLdivTerm( Ni_ki_shift1 + additional_differ_count_i_before,
01423                                               inputs_histo( j, kj ) )
01424                                    * ( 1 + n_differ_before_ki_shift1 );
01425                 }
01426                 else // ( Ni_ki_shift1' == 0 == Nj_ki_shift1 ) && ( pas de q[i] avant q[j']... )
01427                 {
01428                     // We search ki' > kj+1 tq inputs_histo( i, ki' ) > 0.
01429                     real additional_differ_count_j_before = 0.;
01430                     real additional_n_differ_before_ki_shift1 = 0.;
01431                     int kj2;
01432                     for( kj2 = kj+1; kj2 < histo_size; kj2++)
01433                     {
01434                         additional_differ_count_j_before += inputs_histo( j, kj2 );
01435                         additional_n_differ_before_ki_shift1 += 1;
01436                         if( inputs_histo( i, kj2 ) > 0. )
01437                             break;
01438                     }
01439                     if ( fast_exact_is_equal(additional_differ_count_j_before, 0. ) )
01440                         n_differ_after_ki_shift1 = n_differ_before_ki_shift1;
01441                     if( kj2 < histo_size )
01442                     {
01443                         grad_update -= KLdivTerm( inputs_histo( i, kj2 ),
01444                                                   Nj_ki_shift1 + additional_differ_count_j_before )
01445                                        * ( 1 + n_differ_before_ki_shift1 + additional_n_differ_before_ki_shift1 );
01446 
01447                         if( additional_differ_count_j_before > 0. )
01448                         {
01449                             grad_update += KLdivTerm( inputs_histo( i, kj2 ),
01450                                                       additional_differ_count_j_before )
01451                                            * ( additional_n_differ_before_ki_shift1 );
01452                         }
01453                     }
01454                 }
01455             }
01456         }
01457     }
01458     return grad_update *HISTO_STEP *over_dq *norm_factor;
01459 }
01460 
01461 real LayerCostModule::delta_SafeKLdivTerm(int i, int j, int index_i, real over_dq)
01462 {
01463     //PLASSERT( over_dq > 0.0 )
01464     PLASSERT( index_i < histo_size - 1 );
01465 
01466     real grad_update = 0.0;
01467 
01468     real Ni_ki = inputs_histo(i,index_i);
01469     PLASSERT( !fast_exact_is_equal(Ni_ki, 0.0) ); // Verification:
01470                                                   // if inputs_histo is up to date,
01471                                                   // the input(isample,i) has been counted
01472     real Ni_ki_shift1 = inputs_histo(i,index_i+1);
01473 
01474     real Nj_ki        = inputs_histo(j,index_i);
01475     real Nj_ki_shift1 = inputs_histo(j,index_i+1);
01476 
01477 
01478         // removing the term of the sum that will be modified
01479         grad_update -= KLdivTerm( Ni_ki, Nj_ki );
01480 
01481         // adding the term of the sum with its modified value
01482         grad_update += KLdivTerm( Ni_ki-one_count, Nj_ki );
01483 
01484         grad_update += KLdivTerm( Ni_ki_shift1+one_count, Nj_ki_shift1 );
01485 
01486         grad_update -= KLdivTerm( Ni_ki_shift1, Nj_ki_shift1 );
01487 
01488     return grad_update *over_dq;
01489 }
01490 
01491 
01492 real LayerCostModule::KLdivTerm(real pi, real pj) const
01493 {
01494     return ( pj - pi ) * safeflog( pi/pj );
01495 }
01496 
01497 
01498 void LayerCostModule::computeHisto(const Mat& inputs)
01499 {
01500     computeHisto(inputs,
01501                  inputs_histo);
01502 }
01503 void LayerCostModule::computeHisto(const Mat& inputs,
01504                                    Mat& histo) const
01505 {
01506     int n_samples = inputs.length();
01507     one_count = 1. / (real)n_samples;
01508 
01509     histo.resize(input_size,histo_size);
01510     histo.clear();
01511     for (int isample = 0; isample < n_samples; isample++)
01512     {
01513         Vec input = inputs(isample);
01514         for (int i = 0; i < input_size; i++)
01515         {
01516             PLASSERT( histo_index(input[i]) < histo_size);
01517             histo( i, histo_index(input[i]) ) += one_count;
01518         }
01519     }
01520 }
01521 
01522 
01523 void LayerCostModule::computeSafeHisto(const Mat& inputs)
01524 {
01525     computeSafeHisto(inputs,
01526                      inputs_histo);
01527 }
01528 void LayerCostModule::computeSafeHisto(const Mat& inputs,
01529                                        Mat& histo) const
01530 {
01531     int n_samples = inputs.length();
01532     one_count = 1. / (real)(n_samples+histo_size);
01533 
01534     histo.resize(input_size,histo_size);
01535     histo.fill(one_count);
01536     for (int isample = 0; isample < n_samples; isample++)
01537     {
01538         Vec input = inputs(isample);
01539         for (int i = 0; i < input_size; i++)
01540             histo(i, histo_index(input[i])) += one_count;
01541     }
01542 }
01543 
01544 
01545 // Return the index of the (1D) histogram
01546 // corresponding to the real input value q in [0,1]
01547 //
01548 int LayerCostModule::histo_index(real q) const
01549 {
01550     PLASSERT( (q >= 0.) && (q <= 1.) );
01551 
01552     if( q >= 1. )
01553        return histo_size - 1;
01554 
01555     PLASSERT( (int)floor(q*(real)histo_size) < histo_size );
01556 
01557 // LINEAR SCALE
01558     return (int)floor(q*(real)histo_size);
01559 }
01560 
01561 // Returns the minimum amount dq which have to be added/removed to q
01562 // so that q+dq will be counted in the next/previous bin of the histogram
01563 //   (cf. LayerCostModule::histo_index)
01564 //
01565 // Note: we do not care about cases where histo_index(q)=histo_size
01566 //      (this is done in the bpropAccUpdate code)
01567 //
01568 real LayerCostModule::dq(real q) const
01569 {
01570     // ** Simple version **
01571     return HISTO_STEP;
01572 
01573     // ** Elaborated version **
01574     //if( fast_exact_is_equal( round(q*(real)histo_size) , ceil(q*(real)histo_size) ) )
01575     //   return HISTO_STEP;
01576     //else
01577     //   return -HISTO_STEP;
01578 
01579     // ** BAD VERSION: too unstable **
01580     // return (real)histo_index(q+1.0/(real)histo_size)/(real)histo_size - q;
01581 }
01582 
01584 // name //
01586 TVec<string> LayerCostModule::costNames()
01587 {
01588     return TVec<string>(1, name);
01589 }
01590 
01592 // addPortName //
01594 void LayerCostModule::addPortName(const string& name)
01595 {
01596     PLASSERT( portname_to_index.find(name) == portname_to_index.end() );
01597     portname_to_index[name] = ports.length();
01598     ports.append(name);
01599 }
01600 
01602 // getPorts //
01604 const TVec<string>& LayerCostModule::getPorts()
01605 {
01606     return ports;
01607 }
01608 
01610 // getPortsSizes //
01612 const TMat<int>& LayerCostModule::getPortSizes()
01613 {
01614     return port_sizes;
01615 }
01616 
01618 // getPortIndex //
01620 int LayerCostModule::getPortIndex(const string& port)
01621 {
01622     map<string, int>::const_iterator it = portname_to_index.find(port);
01623     if (it == portname_to_index.end())
01624         return -1;
01625     else
01626         return it->second;
01627 }
01628 
01629 
01630 } // end of namespace PLearn
01631 
01632 
01633 /*
01634   Local Variables:
01635   mode:c++
01636   c-basic-offset:4
01637   c-file-style:"stroustrup"
01638   c-file-offsets:((innamespace . 0)(inline-open . 0))
01639   indent-tabs-mode:nil
01640   fill-column:79
01641   End:
01642 */
01643 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines