PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // LocallyMagnifiedDistribution.cc 00004 // 00005 // Copyright (C) 2005 Pascal Vincent 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: LocallyMagnifiedDistribution.cc 8447 2008-02-02 15:01:03Z plearner $ 00037 ******************************************************* */ 00038 00039 // Authors: Pascal Vincent 00040 00044 #include "LocallyMagnifiedDistribution.h" 00045 #include <plearn/vmat/ConcatColumnsVMatrix.h> 00046 #include <plearn/vmat/MemoryVMatrix.h> 00047 #include <plearn_learners/nearest_neighbors/ExhaustiveNearestNeighbors.h> 00048 #include <plearn/base/tostring.h> 00049 #include <plearn_learners/distributions/GaussianDistribution.h> 00050 00051 namespace PLearn { 00052 using namespace std; 00053 00055 // LocallyMagnifiedDistribution // 00057 LocallyMagnifiedDistribution::LocallyMagnifiedDistribution() 00058 :display_adapted_width(true), 00059 mode(0), 00060 computation_neighbors(-1), 00061 kernel_adapt_width_mode(' '), 00062 fix_localdistr_center(true), 00063 width_neighbors(1.0), 00064 width_factor(1.0), 00065 width_optionname("sigma") 00066 { 00067 } 00068 00069 PLEARN_IMPLEMENT_OBJECT(LocallyMagnifiedDistribution, 00070 "Density estimation by fitting a local model (specified by localdistr) to a view of the training samples, magnified locally around the test point.", 00071 "" 00072 ); 00073 00075 // declareOptions // 00077 void LocallyMagnifiedDistribution::declareOptions(OptionList& ol) 00078 { 00079 // ### Declare all of this object's options here 00080 // ### For the "flags" of each option, you should typically specify 00081 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00082 // ### OptionBase::tuningoption. Another possible flag to be combined with 00083 // ### is OptionBase::nosave 00084 00085 declareOption(ol, "mode", &LocallyMagnifiedDistribution::mode, OptionBase::buildoption, 00086 "Output computation mode"); 00087 00088 declareOption(ol, "computation_neighbors", &LocallyMagnifiedDistribution::computation_neighbors, OptionBase::buildoption, 00089 "This indicates to how many neighbors we should restrict ourselves for the computations.\n" 00090 "(it's equivalent to giving all other data points a weight of 0)\n" 00091 "If <=0 we use all training points (with an appropriate weight).\n" 00092 "If >1 we consider only that many neighbors of the test point;\n" 00093 "If between 0 and 1, it's considered a coefficient by which to multiply\n" 00094 "the square root of the numbder of training points, to yield the actual \n" 00095 "number of computation neighbors used"); 00096 00097 declareOption(ol, "weighting_kernel", &LocallyMagnifiedDistribution::weighting_kernel, OptionBase::buildoption, 00098 "The magnifying kernel that will be used to locally weigh the samples.\n" 00099 "If it is left null then all computation_neighbors will receive a weight of 1\n"); 00100 00101 declareOption(ol, "kernel_adapt_width_mode", &LocallyMagnifiedDistribution::kernel_adapt_width_mode, OptionBase::buildoption, 00102 "This controls how we adapt the width of the kernel to the local neighborhood of the test point.\n" 00103 "' ' means leave width unchanged\n" 00104 "'A' means set the width to width_factor times the average distance to the neighbors determined by width_neighborss.\n" 00105 "'M' means set the width to width_faactor times the maximum distance to the neighbors determined by width_neighborss.\n"); 00106 00107 declareOption(ol, "width_neighbors", &LocallyMagnifiedDistribution::width_neighbors, OptionBase::buildoption, 00108 "width_neighbors tells how many neighbors to consider to determine the kernel width.\n" 00109 "(see kernel_adapt_width_mode) \n" 00110 "If width_neighbors>1 we consider that many neighbors.\n" 00111 "If width_neighbors>=0 and <=1 it's considered a coefficient by which to multiply\n" 00112 "the square root of the numbder of training points, to yield the actual \n" 00113 "number of neighbors used"); 00114 00115 declareOption(ol, "width_factor", &LocallyMagnifiedDistribution::width_factor, OptionBase::buildoption, 00116 "Only used if width_neighbors>0 (see width_neighbors)"); 00117 00118 declareOption(ol, "width_optionname", &LocallyMagnifiedDistribution::width_optionname, OptionBase::buildoption, 00119 "Only used if kernel_adapt_width_mode!=' '. The name of the option in the weighting kernel that should be used to set or modifiy its width"); 00120 00121 declareOption(ol, "localdistr", &LocallyMagnifiedDistribution::localdistr, OptionBase::buildoption, 00122 "The kind of distribution that will be trained with local weights obtained from the magnifying kernel.\n" 00123 "If left unspecified (null), it will be set to GaussianDistribution by default."); 00124 00125 declareOption(ol, "fix_localdistr_center", &LocallyMagnifiedDistribution::fix_localdistr_center, OptionBase::buildoption, 00126 "If true, and localdistr is GaussianDistribution, then the mu of the localdistr will be forced to be the given test point."); 00127 00128 declareOption(ol, "train_set", &LocallyMagnifiedDistribution::train_set, OptionBase::learntoption, 00129 "We need to store the training set, as this learner is memory-based..."); 00130 00131 declareOption(ol, "NN", &LocallyMagnifiedDistribution::NN, OptionBase::learntoption, 00132 "The nearest neighbor algorithm used to find nearest neighbors"); 00133 00134 // Now call the parent class' declareOptions(). 00135 inherited::declareOptions(ol); 00136 } 00137 00139 // build // 00141 void LocallyMagnifiedDistribution::build() 00142 { 00143 // ### Nothing to add here, simply calls build_(). 00144 inherited::build(); 00145 build_(); 00146 } 00147 00149 // build_ // 00151 void LocallyMagnifiedDistribution::build_() 00152 { 00153 // ### This method should do the real building of the object, 00154 // ### according to set 'options', in *any* situation. 00155 // ### Typical situations include: 00156 // ### - Initial building of an object from a few user-specified options 00157 // ### - Building of a "reloaded" object: i.e. from the complete set of all serialised options. 00158 // ### - Updating or "re-building" of an object after a few "tuning" options have been modified. 00159 // ### You should assume that the parent class' build_() has already been called. 00160 00161 // ### If the distribution is conditional, you should finish build_() by: 00162 // PDistribution::finishConditionalBuild(); 00163 00164 if(localdistr.isNull()) 00165 { 00166 GaussianDistribution* distr = new GaussianDistribution(); 00167 distr->ignore_weights_below = 1e-6; 00168 distr->build(); 00169 localdistr = distr; 00170 } 00171 } 00172 00174 // log_density // 00176 real LocallyMagnifiedDistribution::log_density(const Vec& y) const 00177 { 00178 int l = train_set.length(); 00179 int w = inputsize(); 00180 int ws = train_set->weightsize(); 00181 trainsample.resize(w+ws); 00182 Vec input = trainsample.subVec(0,w); 00183 00184 PLASSERT(targetsize()==0); 00185 00186 int comp_n = getActualNComputationNeighbors(); 00187 int width_n = getActualNWidthNeighbors(); 00188 00189 if(comp_n>0 || width_n>0) 00190 NN->computeOutputAndCosts(y, emptyvec, NN_outputs, NN_costs); 00191 00192 if(kernel_adapt_width_mode!=' ') 00193 { 00194 real new_width = 0; 00195 if(kernel_adapt_width_mode=='M') 00196 { 00197 new_width = width_factor*NN_costs[width_n-1]; 00198 // if(display_adapted_width) 00199 // perr << "new_width=" << width_factor << " * NN_costs["<<width_n-1<<"] = "<< new_width << endl; 00200 } 00201 else if(kernel_adapt_width_mode=='Z') 00202 { 00203 new_width = width_factor*sqrt(square(NN_costs[width_n-1])/w); 00204 } 00205 else if(kernel_adapt_width_mode=='A') 00206 { 00207 for(int k=0; k<width_n; k++) 00208 new_width += NN_costs[k]; 00209 new_width *= width_factor/width_n; 00210 } 00211 else 00212 PLERROR("Invalid kernel_adapt_width_mode: %c",kernel_adapt_width_mode); 00213 00214 // hack to display only first adapted width 00215 if(display_adapted_width) 00216 { 00217 /* 00218 perr << "NN_outputs = " << NN_outputs << endl; 00219 perr << "NN_costs = " << NN_costs << endl; 00220 perr << "inutsize = " << w << endl; 00221 perr << "length = " << l << endl; 00222 */ 00223 perr << "Adapted kernel width = " << new_width << endl; 00224 display_adapted_width = false; 00225 } 00226 00227 weighting_kernel->setOption(width_optionname,tostring(new_width)); 00228 weighting_kernel->build(); // rebuild to adapt to width change 00229 } 00230 00231 double weightsum = 0; 00232 00233 VMat local_trainset; 00234 if(comp_n>0) // we'll use only the neighbors 00235 { 00236 int n = NN_outputs.length(); 00237 Mat neighbors(n, w+1); 00238 neighbors.lastColumn().fill(1.0); // default weight 1.0 00239 for(int k=0; k<n; k++) 00240 { 00241 Vec neighbors_k = neighbors(k); 00242 Vec neighbors_row = neighbors_k.subVec(0,w+ws); 00243 Vec neighbors_input = neighbors_row.subVec(0,w); 00244 train_set->getRow(int(NN_outputs[k]),neighbors_row); 00245 real weight = 1.; 00246 if(weighting_kernel.isNotNull()) 00247 weight = weighting_kernel(y,neighbors_input); 00248 weightsum += weight; 00249 neighbors_k[w] *= weight; 00250 } 00251 local_trainset = new MemoryVMatrix(neighbors); 00252 local_trainset->defineSizes(w,0,1); 00253 } 00254 else // we'll use all the points 00255 { 00256 // 'weights' will contain the "localization" weights for the current test point. 00257 weights.resize(l); 00258 for(int i=0; i<l; i++) 00259 { 00260 train_set->getRow(i,trainsample); 00261 real weight = 1.; 00262 if(weighting_kernel.isNotNull()) 00263 weight = weighting_kernel(y,input); 00264 if(ws==1) 00265 weight *= trainsample[w]; 00266 weightsum += weight; 00267 weights[i] = weight; 00268 } 00269 00270 VMat weight_column(columnmatrix(weights)); 00271 if(ws==0) // append weight column 00272 local_trainset = hconcat(train_set, weight_column); 00273 else // replace last column by weight column 00274 local_trainset = hconcat(train_set.subMatColumns(0,w), weight_column); 00275 local_trainset->defineSizes(w,0,1); 00276 } 00277 00278 00279 // perr << "local_trainset =" << endl << local_trainset->toMat() << endl; 00280 double log_local_p = 0; 00281 00282 switch(mode) 00283 { 00284 case 0: 00285 log_local_p = trainLocalDistrAndEvaluateLogDensity(local_trainset, y); 00286 return log_local_p + pl_log((double)weightsum) - pl_log((double)l) - pl_log((double)weighting_kernel(input,input)); 00287 case 1: 00288 log_local_p = trainLocalDistrAndEvaluateLogDensity(local_trainset, y); 00289 return log_local_p; 00290 case 2: 00291 return pl_log((double)weightsum) - pl_log((double)l); 00292 case 3: 00293 return pl_log((double)weightsum); 00294 case 4: 00295 log_local_p = trainLocalDistrAndEvaluateLogDensity(local_trainset, y); 00296 return log_local_p+pl_log((double)width_n)-pl_log((double)l); 00297 default: 00298 PLERROR("Invalid mode %d", mode); 00299 return 0; 00300 } 00301 } 00302 00303 double LocallyMagnifiedDistribution::trainLocalDistrAndEvaluateLogDensity(VMat local_trainset, Vec y) const 00304 { 00305 if(fix_localdistr_center) 00306 { 00307 GaussianDistribution* distr = dynamic_cast<GaussianDistribution*>((PDistribution*)localdistr); 00308 if(distr!=0) 00309 distr->given_mu = y; 00310 } 00311 localdistr->forget(); 00312 localdistr->setTrainingSet(local_trainset); 00313 localdistr->train(); 00314 double log_local_p = localdistr->log_density(y); 00315 return log_local_p; 00316 } 00317 00318 00320 // makeDeepCopyFromShallowCopy // 00322 void LocallyMagnifiedDistribution::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00323 { 00324 inherited::makeDeepCopyFromShallowCopy(copies); 00325 00326 // ### Call deepCopyField on all "pointer-like" fields 00327 // ### that you wish to be deepCopied rather than 00328 // ### shallow-copied. 00329 // ### ex: 00330 // deepCopyField(trainvec, copies); 00331 deepCopyField(weighting_kernel, copies); 00332 deepCopyField(localdistr, copies); 00333 deepCopyField(NN, copies); 00334 } 00335 00336 int LocallyMagnifiedDistribution::getActualNComputationNeighbors() const 00337 { 00338 if(computation_neighbors<=0) 00339 return 0; 00340 else if(computation_neighbors>1) 00341 return int(computation_neighbors); 00342 else 00343 return int(computation_neighbors*sqrt(train_set->length())); 00344 } 00345 00346 int LocallyMagnifiedDistribution::getActualNWidthNeighbors() const 00347 { 00348 if(width_neighbors<0) 00349 return 0; 00350 else if(width_neighbors>1) 00351 return int(width_neighbors); 00352 return int(width_neighbors*sqrt(train_set->length())); 00353 } 00354 00355 00356 // ### Remove this method, if your distribution does not implement it. 00358 // train // 00360 void LocallyMagnifiedDistribution::train() 00361 { 00362 int comp_n = getActualNComputationNeighbors(); 00363 int width_n = getActualNWidthNeighbors(); 00364 int actual_nneighbors = max(comp_n, width_n); 00365 00366 if(train_set.isNotNull()) 00367 actual_nneighbors = min(actual_nneighbors, train_set.length()); 00368 00369 if(actual_nneighbors>0) 00370 { 00371 NN = new ExhaustiveNearestNeighbors(); // for now use Exhaustive search and default Euclidean distance 00372 NN->num_neighbors = actual_nneighbors; 00373 NN->copy_input = false; 00374 NN->copy_target = false; 00375 NN->copy_weight = false; 00376 NN->copy_index = true; 00377 NN->build(); 00378 if(train_set.isNotNull()) 00379 { 00380 NN->setTrainingSet(train_set); 00381 NN->train(); 00382 } 00383 NN_outputs.resize(actual_nneighbors); 00384 NN_costs.resize(actual_nneighbors); 00385 } 00386 } 00387 00388 00389 void LocallyMagnifiedDistribution::forget() 00390 { 00391 if(NN.isNotNull()) 00392 NN->forget(); 00393 } 00394 00395 00396 } 00397 00398 00399 /* 00400 Local Variables: 00401 mode:c++ 00402 c-basic-offset:4 00403 c-file-style:"stroustrup" 00404 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00405 indent-tabs-mode:nil 00406 fill-column:79 00407 End: 00408 */ 00409 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :