PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 // Copyright (C) 2003 Olivier Delalleau 00009 00010 // Redistribution and use in source and binary forms, with or without 00011 // modification, are permitted provided that the following conditions are met: 00012 // 00013 // 1. Redistributions of source code must retain the above copyright 00014 // notice, this list of conditions and the following disclaimer. 00015 // 00016 // 2. Redistributions in binary form must reproduce the above copyright 00017 // notice, this list of conditions and the following disclaimer in the 00018 // documentation and/or other materials provided with the distribution. 00019 // 00020 // 3. The name of the authors may not be used to endorse or promote 00021 // products derived from this software without specific prior written 00022 // permission. 00023 // 00024 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00025 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00026 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00027 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00028 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00029 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00030 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00031 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00032 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00033 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00034 // 00035 // This file is part of the PLearn library. For more information on the PLearn 00036 // library, go to the PLearn Web site at www.plearn.org 00037 00038 00039 /* ******************************************************* 00040 * $Id: NegCrossEntropySigmoidVariable.cc 8376 2008-01-10 15:08:15Z saintmlx $ 00041 * This file is part of the PLearn library. 00042 ******************************************************* */ 00043 00044 #include "NegCrossEntropySigmoidVariable.h" 00045 00046 namespace PLearn { 00047 using namespace std; 00048 00051 PLEARN_IMPLEMENT_OBJECT(NegCrossEntropySigmoidVariable, 00052 "Compute sigmoid of its first input, and then computes the negative " 00053 "cross-entropy cost", 00054 "Let o the first input ant t te second input, this computes\n" 00055 "result = - \\sum_i t_i*log(o_i) + (1-t_i)*log(1-o_i)"); 00056 00058 // NegCrossEntropySigmoidVariable // 00060 NegCrossEntropySigmoidVariable::NegCrossEntropySigmoidVariable(Variable* netout, Variable* target, real regularizer_, bool ignore_missing_) 00061 : inherited(netout,target,1,1),regularizer(regularizer_), ignore_missing(ignore_missing_) 00062 { 00063 build_(); 00064 } 00065 00067 // declareOptions // 00069 void NegCrossEntropySigmoidVariable::declareOptions(OptionList& ol) 00070 { 00071 declareOption(ol, "regularizer", &NegCrossEntropySigmoidVariable::regularizer, OptionBase::buildoption, 00072 "If > 0, will modify the cost function to: \n" 00073 "(1-t)(r*log(o)+(1-r)*log(1-o)) + t*(r*log(1-o)+(1-r)*log(o)) \n" 00074 "(t = target, o = output, r = regularizer = a small value)\n"); 00075 00076 declareOption(ol, "ignore_missing", &NegCrossEntropySigmoidVariable::ignore_missing, OptionBase::buildoption, 00077 "Indication that missing targets should be ignored"); 00078 00079 inherited::declareOptions(ol); 00080 } 00081 00083 // build // 00085 void NegCrossEntropySigmoidVariable::build() 00086 { 00087 inherited::build(); 00088 build_(); 00089 } 00090 00091 void NegCrossEntropySigmoidVariable::build_() 00092 { 00093 if (input1 && input2) { 00094 // input1 and input2 are (respectively) netout and target from constructor 00095 if(input1->size() != input2->size()) 00096 PLERROR("In NegCrossEntropySigmoidVariable: netout and target must have the same size"); 00097 } 00098 } 00099 00101 // recomputeSize // 00103 void NegCrossEntropySigmoidVariable::recomputeSize(int& l, int& w) const 00104 { l=1, w=1; } 00105 00107 // fprop // 00109 void NegCrossEntropySigmoidVariable::fprop() 00110 { 00111 real cost = 0.0; 00112 for (int i=0; i<input1->size(); i++) 00113 { 00114 real output = sigmoid(input1->valuedata[i]); 00115 real target = input2->valuedata[i]; 00116 if(!ignore_missing || !is_missing(target)) 00117 { 00118 if (fast_exact_is_equal(output,0.0)) { 00119 if (fast_exact_is_equal(target, 1.0)) { 00120 PLWARNING("NegCrossEntropySigmoidVariable::fprop: model output is 0 and target is 1, cost should be infinite !"); 00121 cost += -1e9; 00122 } // If target == 0.0 do nothing, cost is 0. 00123 } else if (fast_exact_is_equal(output, 1.0)) { 00124 if (fast_exact_is_equal(target, 0.0)) { 00125 PLWARNING("NegCrossEntropySigmoidVariable::fprop: model output is 1 and target is 0, cost should be infinite !"); 00126 cost += -1e9; 00127 } // If target == 1.0 do nothing, cost is 0. 00128 } else { 00129 if (fast_exact_is_equal(regularizer, 0)) { 00130 // Standard cross entropy. 00131 cost += target*pl_log(output) + (1.0-target)*pl_log(1.0-output); 00132 } else { 00133 // Regularized cross entropy. 00134 cost += target*((1 - regularizer) * pl_log(output) + regularizer * pl_log(1.0 - output)) + 00135 (1.0-target)*((1 - regularizer) * pl_log(1.0-output) + regularizer * pl_log(output)); 00136 } 00137 } 00138 } 00139 } 00140 valuedata[0] = -cost; 00141 } 00142 00144 // bprop // 00146 void NegCrossEntropySigmoidVariable::bprop() 00147 { 00148 real gr = *gradientdata; 00149 for (int i=0; i<input1->size(); i++) 00150 { 00151 real output = sigmoid(input1->valuedata[i]); 00152 real target = input2->valuedata[i]; 00153 if(!ignore_missing || !is_missing(target)) 00154 { 00155 00156 if (fast_exact_is_equal(regularizer, 0)) { 00157 // Standard cross entropy. 00158 input1->gradientdata[i] += gr*(output - target); 00159 } else { 00160 // Regularized cross entropy. 00161 if (fast_exact_is_equal(target, 0.0)) { 00162 input1->gradientdata[i] += gr*((1-regularizer) * output - regularizer * (1-output)); 00163 } else if (fast_exact_is_equal(target, 1.0)) { 00164 input1->gradientdata[i] += gr*(regularizer * output - (1-regularizer) * (1-output)); 00165 } else { 00166 PLERROR("NegCrossEntropySigmoidVariable::bprop: target is neither 0 nor 1"); 00167 } 00168 } 00169 } 00170 } 00171 } 00172 00174 // setRegularizer // 00176 void NegCrossEntropySigmoidVariable::setRegularizer(real r) 00177 { 00178 PLWARNING("NegCrossEntropySigmoidVariable::setRegularizer() has been deprecated, use the setOption() method instead"); 00179 this->regularizer = r; 00180 } 00181 00182 } // end of namespace PLearn 00183 00184 00185 /* 00186 Local Variables: 00187 mode:c++ 00188 c-basic-offset:4 00189 c-file-style:"stroustrup" 00190 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00191 indent-tabs-mode:nil 00192 fill-column:79 00193 End: 00194 */ 00195 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :