PLearn 0.1
SoftmaxNLLCostModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // SoftmaxNLLCostModule.cc
00004 //
00005 // Copyright (C) 2008 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00041 #include "SoftmaxNLLCostModule.h"
00042 
00043 namespace PLearn {
00044 using namespace std;
00045 
00046 PLEARN_IMPLEMENT_OBJECT(
00047     SoftmaxNLLCostModule,
00048     "Like SoftmaxModule and NLLCostModule, with more precision.",
00049     "If target is the index of the true class, this module computes\n"
00050     "    cost = -log( softmax(input)[target] ),\n"
00051     "and back-propagates the gradient and diagonal of Hessian.\n");
00052 
00053 SoftmaxNLLCostModule::SoftmaxNLLCostModule()
00054 {
00055     output_size = 1;
00056     target_size = 1;
00057 }
00058 
00059 void SoftmaxNLLCostModule::declareOptions(OptionList& ol)
00060 {
00061     // declareOption(ol, "myoption", &SoftmaxNLLCostModule::myoption,
00062     //               OptionBase::buildoption,
00063     //               "Help text describing this option");
00064 
00065     // Now call the parent class' declareOptions
00066     inherited::declareOptions(ol);
00067 }
00068 
00069 void SoftmaxNLLCostModule::build_()
00070 {
00071 }
00072 
00073 // ### Nothing to add here, simply calls build_
00074 void SoftmaxNLLCostModule::build()
00075 {
00076     inherited::build();
00077     build_();
00078 }
00079 
00080 
00081 void SoftmaxNLLCostModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00082 {
00083     inherited::makeDeepCopyFromShallowCopy(copies);
00084 }
00085 
00086 
00088 // fprop //
00090 void SoftmaxNLLCostModule::fprop(const Vec& input, const Vec& target, Vec& cost) const
00091 {
00092     PLASSERT( input.size() == input_size );
00093     PLASSERT( target.size() == target_size );
00094 
00095     tmp_vec.resize(input_size);
00096     cost.resize(output_size);
00097 
00098     if (input.hasMissing())
00099         cost[0] = MISSING_VALUE;
00100     else
00101     {
00102         int the_target = (int) round( target[0] );
00103         log_softmax(input, tmp_vec);
00104         cost[0] = - tmp_vec[the_target];
00105     }
00106 }
00107 
00108 void SoftmaxNLLCostModule::fprop(const Mat& inputs, const Mat& targets, Mat& costs)
00109     const
00110 {
00111     PLASSERT( inputs.width() == input_size );
00112     PLASSERT( targets.width() == target_size );
00113 
00114     int batch_size = inputs.length();
00115     PLASSERT( inputs.length() == batch_size );
00116     PLASSERT( targets.length() == batch_size );
00117 
00118     tmp_vec.resize(input_size);
00119     costs.resize(batch_size, output_size);
00120 
00121     for( int k=0; k<batch_size; k++ )
00122     {
00123         if (inputs(k).hasMissing())
00124             costs(k, 0) = MISSING_VALUE;
00125         else
00126         {
00127             int target_k = (int) round( targets(k, 0) );
00128             log_softmax(inputs(k), tmp_vec);
00129             costs(k, 0) = - tmp_vec[target_k];
00130         }
00131     }
00132 }
00133 
00134 void SoftmaxNLLCostModule::fprop(const TVec<Mat*>& ports_value)
00135 {
00136     PLASSERT( ports_value.length() == nPorts() );
00137 
00138     Mat* prediction = ports_value[0];
00139     Mat* target = ports_value[1];
00140     Mat* cost = ports_value[2];
00141 
00142     // If we have prediction and target, and we want cost
00143     if( prediction && !prediction->isEmpty()
00144         && target && !target->isEmpty()
00145         && cost && cost->isEmpty() )
00146 
00147     {
00148         PLASSERT( prediction->width() == port_sizes(0, 1) );
00149         PLASSERT( target->width() == port_sizes(1, 1) );
00150 
00151         int batch_size = prediction->length();
00152         PLASSERT( target->length() == batch_size );
00153 
00154         cost->resize(batch_size, port_sizes(2, 1));
00155 
00156 
00157         for( int i=0; i<batch_size; i++ )
00158         {
00159             if( (*prediction)(i).hasMissing() || is_missing((*target)(i,0)) )
00160                 (*cost)(i,0) = MISSING_VALUE;
00161             else
00162             {
00163                 int target_i = (int) round( (*target)(i,0) );
00164                 PLASSERT( is_equal( (*target)(i, 0), target_i ) );
00165                 log_softmax( (*prediction)(i), tmp_vec );
00166                 (*cost)(i,0) = - tmp_vec[target_i];
00167             }
00168         }
00169     }
00170     else if( !prediction && !target && !cost )
00171         return;
00172     else
00173         PLCHECK_MSG( false, "Unknown port configuration" );
00174 
00175     checkProp(ports_value);
00176 }
00177 
00179 // bpropUpdate //
00181 void SoftmaxNLLCostModule::bpropUpdate(
00182         const Vec& input, const Vec& target, real cost,
00183         Vec& input_gradient, bool accumulate)
00184 {
00185     PLASSERT( input.size() == input_size );
00186     PLASSERT( target.size() == target_size );
00187 
00188     if( accumulate )
00189     {
00190         PLASSERT_MSG( input_gradient.size() == input_size,
00191                       "Cannot resize input_gradient AND accumulate into it" );
00192     }
00193     else
00194     {
00195         input_gradient.resize( input_size );
00196         input_gradient.clear();
00197     }
00198 
00199     int the_target = (int) round( target[0] );
00200 
00201     // input_gradient[ i ] = softmax(x)[i] if i != t,
00202     // input_gradient[ t ] = softmax(x)[t] - 1.
00203     softmax(input, input_gradient);
00204     input_gradient[ the_target ] -= 1.;
00205 }
00206 
00207 void SoftmaxNLLCostModule::bpropUpdate(
00208         const Mat& inputs, const Mat& targets, const Vec& costs,
00209         Mat& input_gradients, bool accumulate)
00210 {
00211     PLASSERT( inputs.width() == input_size );
00212     PLASSERT( targets.width() == target_size );
00213 
00214     if( accumulate )
00215     {
00216         PLASSERT_MSG( input_gradients.width() == input_size &&
00217                 input_gradients.length() == inputs.length(),
00218                 "Cannot resize input_gradients and accumulate into it" );
00219     }
00220     else
00221     {
00222         input_gradients.resize(inputs.length(), input_size );
00223         input_gradients.clear();
00224     }
00225 
00226     // input_gradient[ i ] = softmax(x)[i] if i != t,
00227     // input_gradient[ t ] = softmax(x)[t] - 1.
00228     for (int i = 0; i < inputs.length(); i++) {
00229         int the_target = (int) round( targets(i, 0) );
00230         softmax(inputs(i), input_gradients(i));
00231         input_gradients(i, the_target) -= 1.;
00232     }
00233 }
00234 
00235 void SoftmaxNLLCostModule::bpropAccUpdate(const TVec<Mat*>& ports_value,
00236                                           const TVec<Mat*>& ports_gradient)
00237 {
00238     PLASSERT( ports_value.length() == nPorts() );
00239     PLASSERT( ports_gradient.length() == nPorts() );
00240 
00241     Mat* prediction = ports_value[0];
00242     Mat* target = ports_value[1];
00243 #ifdef BOUNDCHECK
00244     Mat* cost = ports_value[2];
00245 #endif
00246     Mat* prediction_grad = ports_gradient[0];
00247     Mat* target_grad = ports_gradient[1];
00248     Mat* cost_grad = ports_gradient[2];
00249 
00250     // If we have cost_grad and we want prediction_grad
00251     if( prediction_grad && prediction_grad->isEmpty()
00252         && cost_grad && !cost_grad->isEmpty() )
00253     {
00254         PLASSERT( prediction );
00255         PLASSERT( target );
00256         PLASSERT( cost );
00257         PLASSERT( !target_grad );
00258 
00259         PLASSERT( prediction->width() == port_sizes(0,1) );
00260         PLASSERT( target->width() == port_sizes(1,1) );
00261         PLASSERT( cost->width() == port_sizes(2,1) );
00262         PLASSERT( prediction_grad->width() == port_sizes(0,1) );
00263         PLASSERT( cost_grad->width() == port_sizes(2,1) );
00264         PLASSERT( cost_grad->width() == 1 );
00265 
00266         int batch_size = prediction->length();
00267         PLASSERT( target->length() == batch_size );
00268         PLASSERT( cost->length() == batch_size );
00269         PLASSERT( cost_grad->length() == batch_size );
00270 
00271         prediction_grad->resize(batch_size, port_sizes(0,1));
00272 
00273         for( int k=0; k<batch_size; k++ )
00274         {
00275             // input_gradient[ i ] = softmax(x)[i] if i != t,
00276             // input_gradient[ t ] = softmax(x)[t] - 1.
00277             int target_k = (int) round((*target)(k, 0));
00278             softmax((*prediction)(k), (*prediction_grad)(k));
00279             (*prediction_grad)(k, target_k) -= 1.;
00280         }
00281     }
00282     else if( !prediction_grad && !target_grad && !cost_grad )
00283         return;
00284     else if( !cost_grad && prediction_grad && prediction_grad->isEmpty() )
00285         PLERROR("In SoftmaxNLLCostModule::bpropAccUpdate - cost gradient is NULL,\n"
00286                 "cannot compute prediction gradient. Maybe you should set\n"
00287                 "\"propagate_gradient = 0\" on the incoming connection.\n");
00288     else
00289         PLERROR("In OnlineLearningModule::bpropAccUpdate - Port configuration "
00290                 "not implemented for class '%s'", classname().c_str());
00291 
00292     checkProp(ports_value);
00293     checkProp(ports_gradient);
00294 }
00295 
00296 void SoftmaxNLLCostModule::bbpropUpdate(const Vec& input, const Vec& target,
00297                                  real cost,
00298                                  Vec& input_gradient, Vec& input_diag_hessian,
00299                                  bool accumulate)
00300 {
00301     PLCHECK(false);
00302 }
00303 
00304 TVec<string> SoftmaxNLLCostModule::costNames()
00305 {
00306     if (name == "" || name == classname())
00307         return TVec<string>(1, "NLL");
00308     else
00309         return TVec<string>(1, name + ".NLL");
00310 }
00311 
00312 } // end of namespace PLearn
00313 
00314 
00315 /*
00316   Local Variables:
00317   mode:c++
00318   c-basic-offset:4
00319   c-file-style:"stroustrup"
00320   c-file-offsets:((innamespace . 0)(inline-open . 0))
00321   indent-tabs-mode:nil
00322   fill-column:79
00323   End:
00324 */
00325 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines