PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // DoubleProductVariable.cc 00004 // 00005 // Copyright (C) 2007 Simon Lemieux, Pascal Vincent 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Simon Lemieux, Pascal Vincent 00036 00040 #include "DoubleProductVariable.h" 00041 00042 namespace PLearn { 00043 using namespace std; 00044 00047 PLEARN_IMPLEMENT_OBJECT( 00048 DoubleProductVariable, 00049 "ONE LINE USER DESCRIPTION", 00050 "Let X,W and M be the inputs, nw the length of W and d their width (they all have the same width)" 00051 " \nThen output(n,i+j*nw) = sum_k{X(n,k)*W(i,k)*M(j,k)}"); 00052 00053 DoubleProductVariable::DoubleProductVariable(Var& x, Var& w, Var& m) 00054 : inherited(x & w & m, x.length(), w.length()*m.length()) 00055 { 00056 build_(); 00057 } 00058 00059 00060 void DoubleProductVariable::recomputeSize(int& l, int& w) const 00061 { 00062 if (varray.size() > 0) { 00063 l = varray[0].length() ; // the computed length of this Var 00064 w = varray[1].length()*varray[2].length(); // the computed width 00065 } else 00066 l = w = 0; 00067 } 00068 00069 // ### computes value from varray values 00070 void DoubleProductVariable::fprop() 00071 { 00072 Mat x = varray[0]->matValue, 00073 w = varray[1]->matValue, 00074 m = varray[2]->matValue; 00075 00076 int nx = x.length(), 00077 nw = w.length(), 00078 nm = m.length(), 00079 d = x.width();// ( = w.width() = m.width() ) 00080 00081 00082 /* 00083 for(int n=0; n<nx; n++) 00084 for(int i=0; i<nw; i++) 00085 for(int j=0; j<nm; j++) 00086 { 00087 matValue(n,i+j*nw) = 0.; 00088 for(int k=0; k<d; k++) 00089 matValue(n,i+j*nw) += x(n,k)*w(i,k)*m(j,k); 00090 } 00091 */ 00092 00093 for(int n=0; n<nx; n++) 00094 { 00095 real* matValue_n = matValue[n]; 00096 const real* x_n = x[n]; 00097 for(int j=0; j<nm; j++) 00098 { 00099 const real* m_j = m[j]; 00100 for(int i=0; i<nw; i++) 00101 { 00102 const real* w_i = w[i]; 00103 real val = 0; 00104 for(int k=0; k<d; k++) 00105 val += x_n[k]*w_i[k]*m_j[k]; 00106 matValue_n[i+j*nw] = val; 00107 } 00108 } 00109 } 00110 } 00111 // ### computes varray gradients from gradient 00112 void DoubleProductVariable::bprop() 00113 { 00114 Mat x = varray[0]->matValue, 00115 w = varray[1]->matValue, 00116 m = varray[2]->matValue, 00117 x_grad = varray[0]->matGradient, 00118 w_grad = varray[1]->matGradient, 00119 m_grad = varray[2]->matGradient, 00120 s_grad = matGradient; 00121 00122 int nx = x.length(), 00123 nw = w.length(), 00124 nm = m.length(), 00125 d = x.width();// ( = w.width()= m.width() ) 00126 00127 /* 00128 for(int n=0; n<nx; n++) 00129 for(int i=0 ;i<nw; i++) 00130 for(int j=0; j<nm; j++) 00131 { 00132 for(int k=0; k<d; k++) 00133 { 00134 x_grad(n,k) += s_grad(n,i+j*nw)*w(i,k)*m(j,k); 00135 w_grad(i,k) += s_grad(n,i+j*nw)*x(n,k)*m(j,k); 00136 m_grad(j,k) += s_grad(n,i+j*nw)*x(n,k)*w(i,k); 00137 } 00138 } 00139 */ 00140 00141 for(int n=0; n<nx; n++) 00142 { 00143 const real* s_grad_n = s_grad[n]; 00144 const real* x_n = x[n]; 00145 real* x_grad_n = x_grad[n]; 00146 for(int j=0; j<nm; j++) 00147 { 00148 const real* m_j = m[j]; 00149 real* m_grad_j = m_grad[j]; 00150 for(int i=0 ;i<nw; i++) 00151 { 00152 const real* w_i = w[i]; 00153 real* w_grad_i = w_grad[i]; 00154 real s_grad_n_val = s_grad_n[i+j*nw]; 00155 for(int k=0; k<d; k++) 00156 { 00157 x_grad_n[k] += s_grad_n_val*w_i[k]*m_j[k]; 00158 w_grad_i[k] += s_grad_n_val*x_n[k]*m_j[k]; 00159 m_grad_j[k] += s_grad_n_val*x_n[k]*w_i[k]; 00160 } 00161 } 00162 } 00163 } 00164 } 00165 // ### You can implement these methods: 00166 // void DoubleProductVariable::bbprop() {} 00167 // void DoubleProductVariable::symbolicBprop() {} 00168 // void DoubleProductVariable::rfprop() {} 00169 00170 00171 // ### Nothing to add here, simply calls build_ 00172 void DoubleProductVariable::build() 00173 { 00174 inherited::build(); 00175 build_(); 00176 } 00177 00178 void DoubleProductVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00179 { 00180 inherited::makeDeepCopyFromShallowCopy(copies); 00181 00182 // ### Call deepCopyField on all "pointer-like" fields 00183 // ### that you wish to be deepCopied rather than 00184 // ### shallow-copied. 00185 // ### ex: 00186 // deepCopyField(trainvec, copies); 00187 00188 // ### If you want to deepCopy a Var field: 00189 // varDeepCopyField(somevariable, copies); 00190 } 00191 00192 void DoubleProductVariable::declareOptions(OptionList& ol) 00193 { 00194 // ### Declare all of this object's options here. 00195 // ### For the "flags" of each option, you should typically specify 00196 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00197 // ### OptionBase::tuningoption. If you don't provide one of these three, 00198 // ### this option will be ignored when loading values from a script. 00199 // ### You can also combine flags, for example with OptionBase::nosave: 00200 // ### (OptionBase::buildoption | OptionBase::nosave) 00201 00202 // ### ex: 00203 // declareOption(ol, "myoption", &DoubleProductVariable::myoption, 00204 // OptionBase::buildoption, 00205 // "Help text describing this option"); 00206 // ... 00207 00208 // Now call the parent class' declareOptions 00209 inherited::declareOptions(ol); 00210 } 00211 00212 void DoubleProductVariable::build_() 00213 { 00214 // ### This method should do the real buildincg of the object, 00215 // ### according to set 'options', in *any* situation. 00216 // ### Typical situations include: 00217 // ### - Initial building of an object from a few user-specified options 00218 // ### - Building of a "reloaded" object: i.e. from the complete set of 00219 // ### all serialised options. 00220 // ### - Updating or "re-building" of an object after a few "tuning" 00221 // ### options have been modified. 00222 // ### You should assume that the parent class' build_() has already been 00223 // ### called. 00224 00225 if (varW().width() != varX().width() || varW().width() != varM().width()) 00226 PLERROR("All input matrix widths must be equal in DoubleProductVariable"); 00227 } 00228 00229 00230 } // end of namespace PLearn 00231 00232 00233 00234 /* 00235 Local Variables: 00236 mode:c++ 00237 c-basic-offset:4 00238 c-file-style:"stroustrup" 00239 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00240 indent-tabs-mode:nil 00241 fill-column:79 00242 End: 00243 */ 00244 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :