PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: NllGeneralGaussianVariable.cc 8169 2007-10-10 22:27:34Z larocheh $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #include <plearn/var/NllGeneralGaussianVariable.h> 00044 #include <plearn/var/Var_operators.h> 00045 #include <plearn/math/plapack.h> 00046 00047 namespace PLearn { 00048 using namespace std; 00049 00052 PLEARN_IMPLEMENT_OBJECT(NllGeneralGaussianVariable, 00053 "Computes the NLL under a Gaussian distribution centered " 00054 "around a data point.", 00055 "This variable computes the negative log-likelihood " 00056 "under a Gaussian distribution\n" 00057 "centered near a data point. The likelihood is computed " 00058 "for some given neighbors\n" 00059 "of the data point. A set of bases defining the " 00060 "principal components of the\n" 00061 "covariance matrix, the difference mu between " 00062 "the data point and the center of \n" 00063 "the Gaussian and the noise variance in all directions " 00064 "of the space must be\n" 00065 "specified. Gradient is propagated in all these " 00066 "parameters. Optionally, the \n" 00067 "gradient for mu can be computed based on the likelihood " 00068 "of less nearest neighbors.\n" 00069 "It is assumed that this Gaussian is part of a mixture " 00070 "model with L components.\n" 00071 ); 00072 00073 NllGeneralGaussianVariable::NllGeneralGaussianVariable(const VarArray& the_varray, real thelogL, bool the_use_mu, int the_mu_nneighbors) 00074 : inherited(the_varray,the_varray[3]->length(),1), 00075 n(varray[3]->size()), 00076 ncomponents(varray[0]->length()%varray[3]->size()), 00077 nneighbors(varray[4]->length()), 00078 log_L(thelogL), 00079 use_mu(the_use_mu), 00080 mu_nneighbors(the_mu_nneighbors) 00081 { 00082 build_(); 00083 } 00084 00085 00086 void 00087 NllGeneralGaussianVariable::build() 00088 { 00089 inherited::build(); 00090 build_(); 00091 } 00092 00093 void 00094 NllGeneralGaussianVariable::build_() 00095 { 00096 00097 // The VarArray constaints the following variables: 00098 // - varray[0] = the tangent plane (ncomponents x n sized vector) 00099 // - varray[1] = mu(data_point) (n x 1) 00100 // - varray[2] = sigma_noise (1 x 1) 00101 // - varray[3] = input data point around which the Gaussian is centered 00102 // - varray[4] = nearest neighbors (nneighbors x n) 00103 00104 if(varray.length() != 5) 00105 PLERROR("In NllGeneralGaussianVariable::build_(): varray is of " 00106 "length %d but should be of length %d", varray.length(), 5); 00107 00108 if(varray[1]->length() != n || varray[1]->width() != 1) 00109 PLERROR("In NllGeneralGaussianVariable::build_(): varray[1] " 00110 "is of size (%d,%d), but should be of size (%d,%d)", 00111 varray[1]->length(), varray[1]->width(), 00112 ncomponents, 1); 00113 00114 if(varray[2]->length() != 1 || varray[2]->width() != 1) 00115 PLERROR("In NllGeneralGaussianVariable::build_(): varray[2] " 00116 "is of size (%d,%d), but should be of size (%d,%d)", 00117 varray[2]->length(), varray[2]->width(), 00118 1, 1); 00119 00120 if(varray[3]->length() != n || varray[3]->width() != 1) 00121 PLERROR("In NllGeneralGaussianVariable::build_(): varray[3] " 00122 "is of size (%d,%d), but should be of size (%d,%d)", 00123 varray[3]->length(), varray[3]->width(), 00124 n,1); 00125 00126 if(varray[4]->width() != n) 00127 PLERROR("In NllGeneralGaussianVariable::build_(): varray[4] " 00128 "is of size (%d,%d), but should be of size (%d,%d)", 00129 varray[3]->length(), varray[3]->width(), 00130 nneighbors, n); 00131 00132 if(mu_nneighbors < 0) mu_nneighbors = nneighbors; 00133 if(mu_nneighbors > nneighbors) 00134 PLERROR("In NllGeneralGaussianVariable::build_(): mu_nneighbors " 00135 "cannot be > than number of provided neighbors"); 00136 00137 F = varray[0]->value.toMat(ncomponents,n); 00138 if(use_mu) mu = varray[1]->value; 00139 sn = varray[2]->value; 00140 input = varray[3]->value; 00141 neighbors = varray[4]->matValue; 00142 00143 diff_neighbor_input.resize(n); 00144 z.resize(nneighbors,n); 00145 U.resize(ncomponents,n); 00146 Ut.resize(n,n); 00147 V.resize(ncomponents,ncomponents); 00148 inv_Sigma_F.resize(ncomponents,n); 00149 inv_Sigma_z.resize(nneighbors,n); 00150 temp_ncomp.resize(ncomponents); 00151 } 00152 00153 00154 void NllGeneralGaussianVariable::recomputeSize(int& len, int& wid) const 00155 { 00156 len = varray[4]->length(); 00157 wid = 1; 00158 } 00159 00160 void NllGeneralGaussianVariable::fprop() 00161 { 00162 F_copy.resize(F.length(),F.width()); 00163 sm_svd.resize(ncomponents); 00164 // N.B. this is the SVD of F' 00165 F_copy << F; 00166 lapackSVD(F_copy, Ut, S, V,'A',1.5); 00167 for (int k=0;k<ncomponents;k++) 00168 { 00169 sm_svd[k] = mypow(S[k],2); 00170 U(k) << Ut(k); 00171 } 00172 00173 real mahal = 0; 00174 real norm_term = 0; 00175 real dotp = 0; 00176 real coef = 0; 00177 inv_Sigma_z.clear(); 00178 tr_inv_Sigma = 0; 00179 for(int j=0; j<nneighbors;j++) 00180 { 00181 zj = z(j); 00182 if(use_mu) 00183 { 00184 substract(neighbors(j),input,diff_neighbor_input); 00185 substract(diff_neighbor_input,mu,zj); 00186 } 00187 else 00188 { 00189 substract(neighbors(j),input,zj); 00190 } 00191 00192 mahal = -0.5*pownorm(zj)/sn[0]; 00193 norm_term = - n/2.0 * Log2Pi - 0.5*(n-ncomponents)*pl_log(sn[0]); 00194 00195 inv_sigma_zj = inv_Sigma_z(j); 00196 inv_sigma_zj << zj; 00197 inv_sigma_zj /= sn[0]; 00198 00199 if(j==0) 00200 tr_inv_Sigma = n/sn[0]; 00201 00202 for(int k=0; k<ncomponents; k++) 00203 { 00204 uk = U(k); 00205 dotp = dot(zj,uk); 00206 coef = (1.0/(sm_svd[k]+sn[0]) - 1.0/sn[0]); 00207 multiplyAcc(inv_sigma_zj,uk,dotp*coef); 00208 mahal -= square(dotp)*0.5*coef; 00209 norm_term -= 0.5*pl_log(sm_svd[k]); 00210 if(j==0) 00211 tr_inv_Sigma += coef; 00212 } 00213 00214 value[j] = -1*(norm_term + mahal); 00215 } 00216 00217 inv_Sigma_F.clear(); 00218 for(int k=0; k<ncomponents; k++) 00219 { 00220 fk = F(k); 00221 inv_sigma_fk = inv_Sigma_F(k); 00222 inv_sigma_fk << fk; 00223 inv_sigma_fk /= sn[0]; 00224 for(int k2=0; k2<ncomponents;k2++) 00225 { 00226 uk2 = U(k2); 00227 multiplyAcc(inv_sigma_fk,uk2, 00228 (1.0/(sm_svd[k2]+sn[0]) - 1.0/sn[0])*dot(fk,uk2)); 00229 } 00230 } 00231 } 00232 00233 // grad_F += alpa ( M - v1 v2') 00234 void NllGeneralGaussianVariable::bprop_to_bases(const Mat& R, const Mat& M, 00235 const Vec& v1, 00236 const Vec& v2, real alpha) 00237 { 00238 #ifdef BOUNDCHECK 00239 if (M.length() != R.length() || M.width() != R.width() 00240 || v1.length()!=M.length() || M.width()!=v2.length() ) 00241 PLERROR("NllGeneralGaussianVariable::bprop_to_bases(): incompatible " 00242 "arguments' sizes"); 00243 #endif 00244 00245 const real* v_1=v1.data(); 00246 const real* v_2=v2.data(); 00247 for (int i=0;i<M.length();i++) 00248 { 00249 real* mi = M[i]; 00250 real* ri = R[i]; 00251 real v1i = v_1[i]; 00252 for (int j=0;j<M.width();j++) 00253 ri[j] += alpha*(mi[j] - v1i * v_2[j]); 00254 } 00255 } 00256 00257 void NllGeneralGaussianVariable::bprop() 00258 { 00259 real coef = exp(-log_L); 00260 for(int neighbor=0; neighbor<nneighbors; neighbor++) 00261 { 00262 // dNLL/dF 00263 00264 product(temp_ncomp,F,inv_Sigma_z(neighbor)); 00265 bprop_to_bases(varray[0]->matGradient,inv_Sigma_F, 00266 temp_ncomp,inv_Sigma_z(neighbor), 00267 gradient[neighbor]*coef); 00268 00269 if(use_mu && neighbor < mu_nneighbors) 00270 { 00271 // dNLL/dmu 00272 00273 multiplyAcc(varray[1]->gradient, inv_Sigma_z(neighbor), 00274 -1.0*gradient[neighbor] *coef) ; 00275 } 00276 00277 // dNLL/dsn 00278 00279 varray[2]->gradient[0] += gradient[neighbor]*coef* 00280 0.5*(tr_inv_Sigma - pownorm(inv_Sigma_z(neighbor))); 00281 00282 } 00283 } 00284 00285 00286 void NllGeneralGaussianVariable::symbolicBprop() 00287 { 00288 PLERROR("In NllGeneralGaussianVariable::symbolicBprop(): Not implemented"); 00289 } 00290 00291 void NllGeneralGaussianVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00292 { 00293 NaryVariable::makeDeepCopyFromShallowCopy(copies); 00294 00295 deepCopyField(input, copies); 00296 deepCopyField(neighbors, copies); 00297 deepCopyField(diff_neighbor_input, copies); 00298 deepCopyField(mu, copies); 00299 deepCopyField(sm_svd, copies); 00300 deepCopyField(sn, copies); 00301 deepCopyField(S, copies); 00302 deepCopyField(uk, copies); 00303 deepCopyField(fk, copies); 00304 deepCopyField(uk2, copies); 00305 deepCopyField(inv_sigma_zj, copies); 00306 deepCopyField(zj, copies); 00307 deepCopyField(inv_sigma_fk, copies); 00308 deepCopyField(temp_ncomp, copies); 00309 deepCopyField(F, copies); 00310 deepCopyField(F_copy, copies); 00311 deepCopyField(z, copies); 00312 deepCopyField(U, copies); 00313 deepCopyField(Ut, copies); 00314 deepCopyField(V, copies); 00315 deepCopyField(inv_Sigma_F, copies); 00316 deepCopyField(inv_Sigma_z, copies); 00317 } 00318 00319 } // end of namespace PLearn 00320 00321 00322 /* 00323 Local Variables: 00324 mode:c++ 00325 c-basic-offset:4 00326 c-file-style:"stroustrup" 00327 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00328 indent-tabs-mode:nil 00329 fill-column:79 00330 End: 00331 */ 00332 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :