PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // LocalGaussianClassifier.cc 00004 // 00005 // Copyright (C) 2007 Pascal Vincent 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pascal Vincent 00036 00040 #include "LocalGaussianClassifier.h" 00041 #include <plearn/math/TMat_maths.h> 00042 #include <plearn/math/distr_maths.h> 00043 00044 namespace PLearn { 00045 using namespace std; 00046 00047 PLEARN_IMPLEMENT_OBJECT( 00048 LocalGaussianClassifier, 00049 "ONE LINE DESCRIPTION", 00050 "MULTI-LINE \nHELP"); 00051 00052 LocalGaussianClassifier::LocalGaussianClassifier() 00053 :nclasses(-1), 00054 computation_neighbors(-1), 00055 kernel_sigma(0.1), 00056 regularization_sigma(1e-6), 00057 ignore_weights_below(1e-8), 00058 minus_one_half_over_kernel_sigma_square(0), 00059 traintarget_ptr(0), 00060 trainweight_ptr(0) 00061 { 00062 // ### You may (or not) want to call build_() to finish building the object 00063 // ### (doing so assumes the parent classes' build_() have been called too 00064 // ### in the parent classes' constructors, something that you must ensure) 00065 00066 // ### If this learner needs to generate random numbers, uncomment the 00067 // ### line below to enable the use of the inherited PRandom object. 00068 // random_gen = new PRandom(); 00069 } 00070 00071 void LocalGaussianClassifier::declareOptions(OptionList& ol) 00072 { 00073 // ### Declare all of this object's options here. 00074 // ### For the "flags" of each option, you should typically specify 00075 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00076 // ### OptionBase::tuningoption. If you don't provide one of these three, 00077 // ### this option will be ignored when loading values from a script. 00078 // ### You can also combine flags, for example with OptionBase::nosave: 00079 // ### (OptionBase::buildoption | OptionBase::nosave) 00080 00081 // ### ex: 00082 // declareOption(ol, "myoption", &LocalGaussianClassifier::myoption, 00083 // OptionBase::buildoption, 00084 // "Help text describing this option"); 00085 // ... 00086 00087 declareOption(ol, "nclasses", &LocalGaussianClassifier::nclasses, OptionBase::buildoption, 00088 "The number of different classes.\n" 00089 "Note that the 'target' part of trining set samples must be an integer\n" 00090 "with values between 0 and nclasses-1.\n"); 00091 00092 declareOption(ol, "computation_neighbors", &LocalGaussianClassifier::computation_neighbors, OptionBase::buildoption, 00093 "This indicates to how many neighbors we should restrict ourselves for the computation\n" 00094 "of the covariance matrix only (since they are much cheaper, weight and mean are always\n" 00095 "computed using all points.)\n" 00096 "If =0 we do not compute a covariance matrix (i.e. use a spherical cov. of width regularization_sigma).\n" 00097 "If <0 we use all training points (with an appropriate weight).\n" 00098 "If >1 we consider only that many neighbors of the test point;\n" 00099 "If between 0 and 1, it's considered a coefficient by which to multiply\n" 00100 "the square root of the number of training points, to yield the actual \n" 00101 "number of computation neighbors used"); 00102 00103 declareOption(ol, "kernel_sigma", &LocalGaussianClassifier::kernel_sigma, OptionBase::buildoption, 00104 "The sigma (standard deviation) of the weighting Gaussian Kernel\n"); 00105 00106 declareOption(ol, "regularization_sigma", &LocalGaussianClassifier::regularization_sigma, OptionBase::buildoption, 00107 "This quantity squared is added to the diagonal of the local empirical covariance matrices.\n"); 00108 00109 declareOption(ol, "ignore_weights_below", &LocalGaussianClassifier::ignore_weights_below, OptionBase::buildoption, 00110 "minimal weight below which we ignore the point (i.e. consider the weight is 0)\n"); 00111 00112 declareOption(ol, "train_set", &LocalGaussianClassifier::train_set, OptionBase::learntoption, 00113 "We need to store the training set, as this learner is memory-based..."); 00114 00115 /* 00116 declareOption(ol, "NN", &LocalGaussianClassifier::NN, OptionBase::learntoption, 00117 "The nearest neighbor algorithm used to find nearest neighbors"); 00118 */ 00119 00120 // Now call the parent class' declareOptions 00121 inherited::declareOptions(ol); 00122 } 00123 00124 void LocalGaussianClassifier::build_() 00125 { 00126 // ### This method should do the real building of the object, 00127 // ### according to set 'options', in *any* situation. 00128 // ### Typical situations include: 00129 // ### - Initial building of an object from a few user-specified options 00130 // ### - Building of a "reloaded" object: i.e. from the complete set of 00131 // ### all serialised options. 00132 // ### - Updating or "re-building" of an object after a few "tuning" 00133 // ### options have been modified. 00134 // ### You should assume that the parent class' build_() has already been 00135 // ### called. 00136 // PLASSERT(weighting_kernel.isNotNull()); 00137 00138 if(train_set.isNotNull()) 00139 setTrainingSet(train_set, false); 00140 00141 minus_one_half_over_kernel_sigma_square = -0.5/(kernel_sigma*kernel_sigma); 00142 } 00143 00144 // ### Nothing to add here, simply calls build_ 00145 void LocalGaussianClassifier::build() 00146 { 00147 inherited::build(); 00148 build_(); 00149 } 00150 00151 00152 void LocalGaussianClassifier::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00153 { 00154 inherited::makeDeepCopyFromShallowCopy(copies); 00155 00156 // ### Call deepCopyField on all "pointer-like" fields 00157 // ### that you wish to be deepCopied rather than 00158 // ### shallow-copied. 00159 // ### ex: 00160 // deepCopyField(trainvec, copies); 00161 // deepCopyField(weighting_kernel, copies); 00162 // deepCopyField(NN, copies); 00163 } 00164 00165 00166 int LocalGaussianClassifier::outputsize() const 00167 { 00168 return nclasses; 00169 } 00170 00171 void LocalGaussianClassifier::forget() 00172 { 00176 00183 inherited::forget(); 00184 } 00185 00186 void LocalGaussianClassifier::train() 00187 { 00188 // The role of the train method is to bring the learner up to 00189 // stage==nstages, updating train_stats with training costs measured 00190 // on-line in the process. 00191 00192 /* TYPICAL CODE: 00193 00194 static Vec input; // static so we don't reallocate memory each time... 00195 static Vec target; // (but be careful that static means shared!) 00196 input.resize(inputsize()); // the train_set's inputsize() 00197 target.resize(targetsize()); // the train_set's targetsize() 00198 real weight; 00199 00200 // This generic PLearner method does a number of standard stuff useful for 00201 // (almost) any learner, and return 'false' if no training should take 00202 // place. See PLearner.h for more details. 00203 if (!initTrain()) 00204 return; 00205 00206 while(stage<nstages) 00207 { 00208 // clear statistics of previous epoch 00209 train_stats->forget(); 00210 00211 //... train for 1 stage, and update train_stats, 00212 // using train_set->getExample(input, target, weight) 00213 // and train_stats->update(train_costs) 00214 00215 ++stage; 00216 train_stats->finalize(); // finalize statistics for this epoch 00217 } 00218 */ 00219 } 00220 00221 void LocalGaussianClassifier::setTrainingSet(VMat training_set, bool call_forget) 00222 { 00223 inherited::setTrainingSet(training_set, call_forget); 00224 00225 // int l = train_set.length(); 00226 int is = inputsize(); 00227 int ts = targetsize(); 00228 PLASSERT(ts==1); 00229 int ws = weightsize(); 00230 PLASSERT(ws==0 || ws==1); 00231 trainsample.resize(is+ts+ws); 00232 traininput = trainsample.subVec(0,is); 00233 traintarget_ptr = &trainsample[is]; 00234 trainweight_ptr = NULL; 00235 if(ws==1) 00236 trainweight_ptr = &trainsample[is+ts]; 00237 00238 log_counts.resize(nclasses); 00239 log_counts2.resize(nclasses); 00240 means.resize(nclasses, is); 00241 allcovars.resize(nclasses*is, is); 00242 covars.resize(nclasses); 00243 for(int c=0; c<nclasses; c++) 00244 covars[c] = allcovars.subMatRows(c*is, is); 00245 } 00246 00247 real LocalGaussianClassifier::computeLogWeight(const Vec& input, const Vec& traininput) const 00248 { 00249 return powdistance(input, traininput, 2.0, true)*minus_one_half_over_kernel_sigma_square; 00250 } 00251 00252 void LocalGaussianClassifier::computeOutput(const Vec& input, Vec& output) const 00253 { 00254 int l = train_set.length(); 00255 PLASSERT(input.length()==inputsize()); 00256 00257 int K = 0; 00258 if(computation_neighbors>1) 00259 K = int(computation_neighbors); 00260 else if(computation_neighbors>0) 00261 K = int(computation_neighbors*sqrt(l)); 00262 else if(computation_neighbors<0) 00263 K = l; 00264 if(K>l) 00265 K = l; 00266 00267 pqvec.resize(K+1); 00268 pair<real,int>* pq = pqvec.begin(); 00269 int pqsize = 0; 00270 00271 log_counts.fill(-FLT_MAX); 00272 if(K>0) 00273 log_counts2.fill(-FLT_MAX); 00274 00275 if(verbosity>=3) 00276 perr << "______________________________________" << endl; 00277 means.clear(); 00278 real ignore_log_weights_below = pl_log(ignore_weights_below); 00279 00280 for(int i=0; i<l; i++) 00281 { 00282 train_set->getRow(i,trainsample); 00283 real log_w = computeLogWeight(input, traininput); 00284 if(trainweight_ptr) 00285 log_w += pl_log(*trainweight_ptr); 00286 if(log_w>=ignore_log_weights_below) 00287 { 00288 if(K>0) 00289 { 00290 real d = -log_w; 00291 if(pqsize<K) 00292 { 00293 pq[pqsize++] = pair<real,int>(d,i); 00294 if(K<l) // need to maintain heap structure only if K<l 00295 push_heap(pq,pq+pqsize); 00296 } 00297 else if(d<pq->first) 00298 { 00299 pop_heap(pq,pq+pqsize); 00300 pq[pqsize-1] = pair<real,int>(d,i); 00301 push_heap(pq,pq+pqsize); 00302 } 00303 } 00304 int c = int(*traintarget_ptr); 00305 real lcc = log_counts[c]; 00306 log_counts[c] = (lcc<ignore_log_weights_below ?log_w :logadd(lcc, log_w)); 00307 multiplyAcc(means(c), traininput, exp(log_w)); 00308 } 00309 } 00310 00311 if(verbosity>=3) 00312 perr << "log_counts: " << log_counts << endl; 00313 00314 for(int c=0; c<nclasses; c++) 00315 if(log_counts[c]>=ignore_log_weights_below) 00316 means(c) *= exp(-log_counts[c]); 00317 00318 allcovars.fill(0.); 00319 if(K>0) // compute covars? 00320 { 00321 for(int k=0; k<pqsize; k++) 00322 { 00323 int i = pq[k].second; 00324 real log_w = -pq[k].first; 00325 train_set->getRow(i,trainsample); 00326 int c = int(*traintarget_ptr); 00327 real lcc = log_counts2[c]; 00328 log_counts2[c] = (lcc<ignore_log_weights_below ?log_w :logadd(lcc, log_w)); 00329 traininput -= means(c); 00330 externalProductScaleAcc(covars[c], traininput, traininput, exp(log_w)); 00331 } 00332 00333 for(int c=0; c<nclasses; c++) 00334 if(log_counts2[c]>=ignore_log_weights_below) 00335 covars[c] *= exp(-log_counts2[c]); 00336 if(verbosity>=3) 00337 perr << "log_counts2: " << log_counts2 << endl; 00338 } 00339 00340 output.resize(nclasses); 00341 output.clear(); 00342 00343 for(int c=0; c<nclasses; c++) 00344 { 00345 if(log_counts[c]<ignore_log_weights_below) 00346 output[c] = -FLT_MAX; 00347 else 00348 { 00349 Mat cov = covars[c]; 00350 addToDiagonal(cov, square(regularization_sigma)); 00351 real log_p_x = logOfNormal(input, means(c), cov); 00352 output[c] = log_p_x + log_counts[c]; 00353 if(verbosity>=4) 00354 { 00355 perr << "** Class " << c << " **" << endl; 00356 perr << "log_p_x: " << log_p_x << endl; 00357 perr << "log_count: " << log_counts[c] << endl; 00358 perr << "mean: " << means(c) << endl; 00359 perr << "regularized covar: \n" << cov << endl; 00360 } 00361 } 00362 } 00363 if(verbosity>=2) 00364 { 00365 perr << "Scores: " << output << endl; 00366 perr << "argmax: " << argmax(output) << endl; 00367 } 00368 } 00369 00370 void LocalGaussianClassifier::computeCostsFromOutputs(const Vec& input, const Vec& output, 00371 const Vec& target, Vec& costs) const 00372 { 00373 costs.resize(2); 00374 int c = int(target[0]); 00375 costs[0] = (argmax(output)==c ?0.0 :1.0); 00376 costs[1] = logadd(output)-output[c]; 00377 } 00378 00379 TVec<string> LocalGaussianClassifier::getTestCostNames() const 00380 { 00381 TVec<string> names(2); 00382 names[0] = "class_error"; 00383 names[1] = "NLL"; 00384 return names; 00385 } 00386 00387 TVec<string> LocalGaussianClassifier::getTrainCostNames() const 00388 { 00389 TVec<string> names; 00390 return names; 00391 } 00392 00393 00394 } // end of namespace PLearn 00395 00396 00397 /* 00398 Local Variables: 00399 mode:c++ 00400 c-basic-offset:4 00401 c-file-style:"stroustrup" 00402 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00403 indent-tabs-mode:nil 00404 fill-column:79 00405 End: 00406 */ 00407 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :