PLearn 0.1
SoftmaxModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // SoftmaxModule.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00041 #include "SoftmaxModule.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     SoftmaxModule,
00049     "Computes the softmax function on a vector.",
00050     ""
00051 );
00052 
00054 // SoftmaxModule //
00056 SoftmaxModule::SoftmaxModule()
00057 {}
00058 
00060 // declareOptions //
00062 void SoftmaxModule::declareOptions(OptionList& ol)
00063 {
00064     // Now call the parent class' declareOptions
00065     inherited::declareOptions(ol);
00066 
00067     // Hide unused options.
00068 
00069     redeclareOption(ol, "output_size", &SoftmaxModule::output_size,
00070                     OptionBase::nosave,
00071                     "Set at build time.");
00072 }
00073 
00075 // build_ //
00077 void SoftmaxModule::build_()
00078 {
00079     output_size = input_size;
00080 }
00081 
00083 // build //
00085 void SoftmaxModule::build()
00086 {
00087     inherited::build();
00088     build_();
00089 }
00090 
00092 // makeDeepCopyFromShallowCopy //
00094 void SoftmaxModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00095 {
00096     inherited::makeDeepCopyFromShallowCopy(copies);
00097 }
00098 
00100 // fprop //
00102 void SoftmaxModule::fprop(const Vec& input, Vec& output) const
00103 {
00104     PLASSERT( input.size() == input_size );
00105     output.resize( output_size );
00106 
00107     softmax( input, output );
00108 }
00109 
00110 void SoftmaxModule::fprop(const Mat& inputs, Mat& outputs)
00111 {
00112     PLASSERT( inputs.width() == input_size );
00113     int n = inputs.length();
00114     outputs.resize(n, output_size );
00115     for (int i = 0; i < n; i++)
00116         softmax(inputs(i), outputs(i));
00117 }
00118 
00120 // bpropUpdate //
00122 void SoftmaxModule::bpropUpdate(const Vec& input, const Vec& output,
00123                                 Vec& input_gradient,
00124                                 const Vec& output_gradient,
00125                                 bool accumulate)
00126 {
00127     PLASSERT( input.size() == input_size );
00128     PLASSERT( output.size() == output_size );
00129     PLASSERT( output_gradient.size() == output_size );
00130 
00131     if( accumulate )
00132     {
00133         PLASSERT_MSG( input_gradient.size() == input_size,
00134                       "Cannot resize input_gradient AND accumulate into it" );
00135     }
00136     else
00137     {
00138         input_gradient.resize( input_size );
00139         input_gradient.clear();
00140     }
00141 
00142     // input_gradient[i] = output_gradient[i] * output[i]
00143     //                  - (output_gradient . output ) output[i]
00144     real outg_dot_out = dot( output_gradient, output );
00145     for( int i=0 ; i<input_size ; i++ )
00146     {
00147         real in_grad_i = (output_gradient[i] - outg_dot_out) * output[i];
00148         input_gradient[i] += in_grad_i;
00149     }
00150 }
00151 
00152 void SoftmaxModule::bpropUpdate(const Mat& inputs, const Mat& outputs,
00153                              Mat& input_gradients,
00154                              const Mat& output_gradients,
00155                              bool accumulate)
00156 {
00157     PLASSERT( inputs.width() == input_size );
00158     PLASSERT( outputs.width() == output_size );
00159     PLASSERT( output_gradients.width() == output_size );
00160 
00161     if( accumulate )
00162     {
00163         PLASSERT_MSG( input_gradients.width() == input_size &&
00164                 input_gradients.length() == inputs.length(),
00165                 "Cannot resize input_gradients and accumulate into it" );
00166     }
00167     else
00168     {
00169         input_gradients.resize(inputs.length(), input_size);
00170         input_gradients.fill(0);
00171     }
00172 
00173     for (int j = 0; j < inputs.length(); j++) {
00174         // input_gradient[i] = output_gradient[i] * output[i]
00175         //                  - (output_gradient . output ) output[i]
00176         real outg_dot_out = dot(output_gradients(j), outputs(j));
00177         for( int i=0 ; i<input_size ; i++ )
00178             input_gradients(j, i) +=
00179                 (output_gradients(j, i) - outg_dot_out) * outputs(j, i);
00180     }
00181 }
00182 
00184 // forget //
00186 void SoftmaxModule::forget()
00187 {
00188 }
00189 
00191 // setLearningRate //
00193 void SoftmaxModule::setLearningRate(real dynamic_learning_rate)
00194 {
00195 }
00196 
00198 // bbpropUpdate //
00200 void SoftmaxModule::bbpropUpdate(const Vec& input, const Vec& output,
00201                                  Vec& input_gradient,
00202                                  const Vec& output_gradient,
00203                                  Vec& input_diag_hessian,
00204                                  const Vec& output_diag_hessian,
00205                                  bool accumulate)
00206 {
00207     PLERROR( "Not implemented yet, please come back later or complain to"
00208              " lamblinp." );
00209 }
00210 
00211 
00212 } // end of namespace PLearn
00213 
00214 
00215 /*
00216   Local Variables:
00217   mode:c++
00218   c-basic-offset:4
00219   c-file-style:"stroustrup"
00220   c-file-offsets:((innamespace . 0)(inline-open . 0))
00221   indent-tabs-mode:nil
00222   fill-column:79
00223   End:
00224 */
00225 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines