PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: MatrixOneHotSquaredLoss.cc 4306 2005-10-23 02:42:13Z tihocan $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #include "MatrixOneHotSquaredLoss.h" 00044 00045 namespace PLearn { 00046 using namespace std; 00047 00048 00051 PLEARN_IMPLEMENT_OBJECT(MatrixOneHotSquaredLoss, 00052 "ONE LINE DESCR", 00053 "NO HELP"); 00054 00055 MatrixOneHotSquaredLoss::MatrixOneHotSquaredLoss() 00056 : coldval_(0.0), hotval_(0.0) 00057 { } 00058 00059 MatrixOneHotSquaredLoss::MatrixOneHotSquaredLoss(Variable* input1, Variable* input2, real coldval, real hotval) 00060 : inherited(input1,input2,input2->length(),input2->width()), coldval_(coldval), hotval_(hotval) 00061 { 00062 build_(); 00063 } 00064 00065 void 00066 MatrixOneHotSquaredLoss::build() 00067 { 00068 inherited::build(); 00069 build_(); 00070 } 00071 00072 void 00073 MatrixOneHotSquaredLoss::build_() 00074 { 00075 if (input2 && !input2->isVec()) 00076 PLERROR("In MatrixOneHotSquaredLoss: classnum must be a vector variable representing the indexs of netouts (typically some classnums)"); 00077 } 00078 00079 void 00080 MatrixOneHotSquaredLoss::declareOptions(OptionList &ol) 00081 { 00082 declareOption(ol, "coldval_", &MatrixOneHotSquaredLoss::coldval_, OptionBase::buildoption, ""); 00083 declareOption(ol, "hotval_", &MatrixOneHotSquaredLoss::hotval_, OptionBase::buildoption, ""); 00084 inherited::declareOptions(ol); 00085 } 00086 00087 void MatrixOneHotSquaredLoss::recomputeSize(int& l, int& w) const 00088 { 00089 if (input2) { 00090 l = input2->length(); 00091 w = input2->width(); 00092 } else 00093 l = w = 0; 00094 } 00095 00096 void MatrixOneHotSquaredLoss::fprop() 00097 { 00098 int n = input1->length(); 00099 for (int k=0; k<length(); k++) 00100 { 00101 int classnum = (int) input2->valuedata[k]; 00102 real res = 0.; 00103 for(int i=0; i<n; i++) 00104 res += square(input1->matValue[i][k] - (i==classnum ? hotval_ : coldval_)); 00105 valuedata[k] = res; 00106 } 00107 } 00108 00109 00110 void MatrixOneHotSquaredLoss::bprop() 00111 { 00112 int n = input1->length(); 00113 for(int k=0; k<length(); k++) 00114 { 00115 real gr = gradientdata[k]; 00116 int classnum = (int) input2->valuedata[k]; 00117 if (!fast_exact_is_equal(gr, 1.)) 00118 { 00119 gr = gr+gr; 00120 for (int i=0; i<n; i++) 00121 input1->matGradient[i][k] += gr*(input1->matValue[i][k] - (i==classnum ? hotval_ : coldval_)); 00122 } 00123 else // specialised version for gr==1 00124 { 00125 for (int i=0; i<n; i++) 00126 input1->matGradient[i][k] += two(input1->matValue[i][k] - (i==classnum ? hotval_ : coldval_)); 00127 } 00128 } 00129 } 00130 00131 00132 void MatrixOneHotSquaredLoss::symbolicBprop() 00133 { 00134 PLERROR("MatrixOneHotSquaredLoss::symbolicBprop not implemented."); 00135 } 00136 00137 00138 00139 } // end of namespace PLearn 00140 00141 00142 /* 00143 Local Variables: 00144 mode:c++ 00145 c-basic-offset:4 00146 c-file-style:"stroustrup" 00147 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00148 indent-tabs-mode:nil 00149 fill-column:79 00150 End: 00151 */ 00152 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :