PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: LogAddVariable.cc 8852 2008-04-21 20:54:19Z tihocan $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #include "ExpVariable.h" 00044 #include "LogAddVariable.h" 00045 #include <plearn/math/pl_math.h> 00046 #include <plearn/math/TMat_maths.h> 00047 #include "Var_operators.h" 00048 //#include "Var_utils.h" 00049 00050 namespace PLearn { 00051 using namespace std; 00052 00053 PLEARN_IMPLEMENT_OBJECT( 00054 LogAddVariable, 00055 "Stable computation of log(exp(input1) + exp(input2)).", 00056 "This Variable may be used:\n" 00057 " - with two inputs of the same sizes, to compute an element-wize\n" 00058 " logadd over both input matrices, or\n" 00059 " - with one matrix input (input1) and one scalar input (input2)\n" 00060 " to compute a vector logadd (i.e. log(exp(a1) + ... + exp(an)))\n" 00061 " over the first 'n' rows (or columns, depending on the option\n" 00062 " 'vector_logadd'), where 'n' is an integer value provided by\n" 00063 " the input2 Variable. If input2 is not provided, then the\n" 00064 " logadd is performed over all rows/columns of input1.\n" 00065 "Note that in order to use the second mechanism, one must change the\n" 00066 "value of the 'vector_logadd' option, to remove any ambiguity, e.g\n" 00067 "in the case of two scalar inputs." 00068 ); 00069 00071 // LogAddVariable // 00073 LogAddVariable::LogAddVariable(): 00074 vector_logadd("none"), 00075 vector_logadd_id(0) 00076 {} 00077 00078 LogAddVariable::LogAddVariable(Variable* input1, Variable* input2, 00079 const string& vl, 00080 bool call_build_): 00081 inherited(input1, input2, 00082 vl == "none" || vl == "per_row" ? input1->length() 00083 : 1, 00084 vl == "none" || vl == "per_column" ? input1->width() 00085 : 1, 00086 call_build_), 00087 vector_logadd(vl), 00088 vector_logadd_id(0) 00089 { 00090 if (call_build_) 00091 build_(); 00092 } 00093 00095 // declareOptions // 00097 void LogAddVariable::declareOptions(OptionList& ol) 00098 { 00099 declareOption(ol, "vector_logadd", &LogAddVariable::vector_logadd, 00100 OptionBase::buildoption, 00101 "Must be one of:\n" 00102 " - 'none' : element-wize logadd over the two input matrices\n" 00103 " - 'per_column': vector logadd on each column of input1, using\n" 00104 " the first 'n' rows as given by input2\n" 00105 " - 'per_row' : vector logadd on each row of input1, using the\n" 00106 " first 'n' columns as given by input2."); 00107 00108 inherited::declareOptions(ol); 00109 } 00110 00112 // build // 00114 void LogAddVariable::build() 00115 { 00116 inherited::build(); 00117 build_(); 00118 } 00119 00121 // build_ // 00123 void LogAddVariable::build_() 00124 { 00125 // Transform the string 'vector_logadd' into an integer for faster 00126 // computations. 00127 if (vector_logadd == "none") 00128 vector_logadd_id = 0; 00129 else if (vector_logadd == "per_row") 00130 vector_logadd_id = 1; 00131 else if (vector_logadd == "per_column") 00132 vector_logadd_id = -1; 00133 else 00134 PLERROR("In LogAddVariable::build_ - Invalid value for " 00135 "'vector_logadd': %s", vector_logadd.c_str()); 00136 00137 if (!vector_logadd_id && input1 && input2) { 00138 if (input1->length() != input2->length() || 00139 input1->width() != input2->width()) 00140 PLERROR("In LogAddVariable::build_ - input1 and input2 must " 00141 "have the same size"); 00142 } 00143 00144 // Need to rebuild since correct sizes depend on 'vector_logadd_id'. 00145 inherited::build(); 00146 } 00147 00149 // makeDeepCopyFromShallowCopy // 00151 void LogAddVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00152 { 00153 inherited::makeDeepCopyFromShallowCopy(copies); 00154 deepCopyField(work, copies); 00155 deepCopyField(work_ptr, copies); 00156 } 00158 // recomputeSize // 00160 void LogAddVariable::recomputeSize(int& l, int& w) const 00161 { 00162 if (input1) { 00163 l = vector_logadd_id >= 0 ? input1->length() 00164 : 1; 00165 w = vector_logadd_id <= 0 ? input1->width() 00166 : 1; 00167 } else 00168 l = w = 0; 00169 } 00170 00172 // fprop // 00174 void LogAddVariable::fprop() 00175 { 00176 if (!vector_logadd_id) { 00177 // Ugly hack to make it compile with ICC. 00178 #ifdef __INTEL_COMPILER 00179 PLearn::apply(input1->value, input2->value, value, logadd_for_icc); 00180 #else 00181 PLearn::apply(input1->value, input2->value, value, logadd); 00182 #endif 00183 } else if (vector_logadd_id > 0) { 00184 int n = input2 ? int(round(input2->value[0])) 00185 : width(); 00186 for (int i = 0; i < length(); i++) { 00187 work_ptr = input1->matValue(i); 00188 if (input2) 00189 work_ptr = work_ptr.subVec(0, n); 00190 value[i] = logadd(work_ptr); 00191 } 00192 } else { 00193 int n = input2 ? int(round(input2->value[0])) 00194 : length(); 00195 work.resize(n); 00196 for (int i = 0; i < width(); i++) { 00197 if (input2) 00198 work << input1->matValue.subMat(0, i, n, 1); 00199 else 00200 work << input1->matValue.column(i); 00201 value[i] = logadd(work); 00202 } 00203 } 00204 } 00205 00207 // bprop // 00209 void LogAddVariable::bprop() 00210 { 00211 if (!vector_logadd_id) { 00212 // TODO Note that these computations are not efficient at all. 00213 Vec grad1(nelems()); 00214 grad1 = input1->value - value; 00215 apply(grad1, grad1, safeexp); 00216 input1->gradient += grad1%gradient; 00217 00218 Vec grad2(nelems()); 00219 grad2 = input2->value - value; 00220 apply(grad2, grad2, safeexp); 00221 input2->gradient += grad2%gradient; 00222 } else if (vector_logadd_id > 0) { 00223 int n = input2 ? int(round(input2->value[0])) 00224 : width(); 00225 work.resize(n); 00226 for (int i = 0; i < length(); i++) { 00227 work << input1->matValue.subMat(i, 0, 1, n); 00228 work -= value[i]; 00229 apply(work, work, safeexp); 00230 multiplyAcc(input1->matGradient.subMat(i, 0, 1, n).toVec(), 00231 work, gradient[i]); 00232 } 00233 } else { 00234 int n = input2 ? int(round(input2->value[0])) 00235 : length(); 00236 work.resize(n); 00237 for (int i = 0; i < width(); i++) { 00238 work << input1->matValue.subMat(0, i, n, 1); 00239 work -= value[i]; 00240 apply(work, work, safeexp); 00241 work *= gradient[i]; 00242 input1->matGradient.subMat(0, i, n, 1) += work.toMat(n, 1); 00243 } 00244 } 00245 } 00246 00248 // symbolicBprop // 00250 void LogAddVariable::symbolicBprop() 00251 { 00252 if (!vector_logadd_id) { 00253 input1->accg(g * (exp(input1)/(exp(input1)+exp(input2)))); 00254 input2->accg(g * (exp(input2)/(exp(input1)+exp(input2)))); 00255 } else { 00256 PLERROR("In LogAddVariable::symbolicBprop - Not implemented"); 00257 } 00258 } 00259 00260 00261 00262 } // end of namespace PLearn 00263 00264 00265 /* 00266 Local Variables: 00267 mode:c++ 00268 c-basic-offset:4 00269 c-file-style:"stroustrup" 00270 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00271 indent-tabs-mode:nil 00272 fill-column:79 00273 End: 00274 */ 00275 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :