PLearn 0.1
KNNClassifier.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // KNNClassifier.cc
00004 //
00005 // Copyright (C) 2004 Nicolas Chapados 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: KNNClassifier.cc 9720 2008-11-25 17:01:21Z tihocan $ 
00037  ******************************************************* */
00038 
00039 // Authors: Nicolas Chapados
00040 
00044 #include "KNNClassifier.h"
00045 #include <assert.h>
00046 #include <math.h>
00047 #include <plearn_learners/nearest_neighbors/ExhaustiveNearestNeighbors.h>
00048 #include <plearn/ker/GaussianKernel.h>
00049 
00050 namespace PLearn {
00051 using namespace std;
00052 
00053 PLEARN_IMPLEMENT_OBJECT(
00054     KNNClassifier,
00055     "Classical K-Nearest-Neighbors classification algorithm",
00056     "This class provides a simple N-class classifier based upon an enclosed\n"
00057     "K-nearest-neighbors finder (derived from GenericNearestNeighbors;\n"
00058     "specified with the 'knn' option).  The target variable (the class), is\n"
00059     "assumed to be coded an integer variable (the class number, from 0 to\n"
00060     "C-1, where C is the number of classes); the number of classes is\n"
00061     "specified with the option 'nclasses'. The structure of the learner\n"
00062     "output is a vector of probabilities for each class (even if\n"
00063     "numclasses==2, which is NOT collapsed into a probability of the positive\n"
00064     "class).\n"
00065     "\n"
00066     "The class contains several options to determine the number of neighbors\n"
00067     "to use (K).  This number always overrides the option 'num_neighbors'\n"
00068     "that may have been specified in the GenericNearestNeighbors utility\n"
00069     "object.  Basically, the generic formula for the number of neighbors is\n"
00070     "\n"
00071     "    K = max(kmin, kmult*(n^kpow)),\n"
00072     "\n"
00073     "where 'kmin', 'kmult', and 'kpow' are options, and 'n' is the number of\n"
00074     "examples in the training set.\n"
00075     "\n"
00076     "The costs output from this class are:\n"
00077     "\n"
00078     "- 'class_error', the classification error, i.e.\n"
00079     "      classerror = max_i output[i] != target\n"
00080     "\n"
00081     "- 'neglogprob', the total negative log-probability of target, i.e.\n"
00082     "      neglogprob = -log(output[target])\n"
00083     "\n"
00084     "If the option 'use_knn_costs_as_weights' is true (by default), it is\n"
00085     "assumed that the costs coming from the 'knn' object are kernel\n"
00086     "evaluations for each nearest neighbor.  These are used as weights to\n"
00087     "determine the final class probabilities.  (NOTE: it is important to use\n"
00088     "a kernel that computes a SIMILARITY MEASURE, and not a DISTANCE MEASURE;\n"
00089     "the default GaussianKernel has the proper behavior.)  If the option\n"
00090     "is false, an equal weighting is used (equivalent to square window).\n"
00091     "\n"
00092     "The weights originally present in the training set ARE TAKEN INTO\n"
00093     "ACCOUNT when weighting each observation: they serve to multiply the\n"
00094     "kernel values to give the effective weight for an observation.\n"
00095     );
00096 
00097 KNNClassifier::KNNClassifier()
00098     : 
00099       nclasses(-1),
00100       kmin(5),
00101       kmult(0.0),
00102       kpow(0.5),
00103       use_knn_costs_as_weights(true),
00104       kernel()
00105 { }
00106 
00107 void KNNClassifier::declareOptions(OptionList& ol)
00108 {
00109     declareOption(
00110         ol, "knn", &KNNClassifier::knn, OptionBase::buildoption,
00111         "The K-nearest-neighbors finder to use (default is an\n"
00112         "ExhaustiveNearestNeighbors with a GaussianKernel, sigma=1)");
00113 
00114     declareOption(
00115         ol, "nclasses", &KNNClassifier::nclasses, OptionBase::buildoption,
00116         "Number of classes in the problem.  MUST be specified.");
00117   
00118     declareOption(
00119         ol, "kmin", &KNNClassifier::kmin, OptionBase::buildoption,
00120         "Minimum number of neighbors to use (default=5)");
00121 
00122     declareOption(
00123         ol, "kmult", &KNNClassifier::kmult, OptionBase::buildoption,
00124         "Multiplicative factor on n^kpow to determine number of neighbors to\n"
00125         "use (default=0)");
00126 
00127     declareOption(
00128         ol, "kpow", &KNNClassifier::kpow, OptionBase::buildoption,
00129         "Power of the number of training examples to determine number of\n"
00130         "neighbors (default=0.5)");
00131 
00132     declareOption(
00133         ol, "use_knn_costs_as_weights", &KNNClassifier::use_knn_costs_as_weights,
00134         OptionBase::buildoption,
00135         "Whether to weigh each of the K neighbors by the kernel evaluations,\n"
00136         "obtained from the costs coming out of the 'knn' object (default=true)");
00137 
00138     declareOption(
00139         ol, "kernel", &KNNClassifier::kernel, OptionBase::buildoption,
00140         "Disregard the 'use_knn_costs_as_weights' option, and use this kernel\n"
00141         "to weight the observations.  If this object is not specified\n"
00142         "(default), and the 'use_knn_costs_as_weights' is false, the\n"
00143         "rectangular kernel is used.");
00144   
00145     declareOption(
00146         ol, "multi_k", &KNNClassifier::multi_k, OptionBase::buildoption,
00147         "This can be used if you wish to simultaneously compute the costs for\n"
00148         "several values of k, efficiently, while doing neighbors search a\n"
00149         "single time. Specify in increasing order, the values of k (number \n"
00150         "the number of neighbors) you are interested in. This will result \n"
00151         "in computing and making available extra costs in addition to \n"
00152         "class_error and neglogprob. For each such specified k, \n"
00153         "there will be a class_error_k and neglogprob_k.\n"
00154         "Note that these will however only be computed correctly for values\n"
00155         "of k that are less or equal to the global K determined by the other\n"
00156         "options. So if you specify a multi_k list, you should probably set \n"
00157         "kmin to the last and largest k of the list.\n"
00158         "On a technical note, these costs will be computed correctly \n"
00159         "only if the call to computeCostsFromOutputs follows the \n"
00160         "computeOutput corresponding to the same input (this is usually\n"
00161         "the case, and a warning is issued if it isn't).");
00162 
00163 
00164     // Now call the parent class' declareOptions
00165     inherited::declareOptions(ol);
00166 }
00167 
00168 void KNNClassifier::build_()
00169 {
00170     if (!knn)
00171         knn=new ExhaustiveNearestNeighbors(new GaussianKernel(), false);
00172 
00173     if (nclasses <= 1)
00174         PLERROR("KNNClassifier::build_: the 'nclasses' option must be specified and >= 2");
00175 
00176     if (kmin <= 0)
00177         PLERROR("KNNClassifier::build_: the 'kmin' option must be strictly positive");
00178 
00179     for(int k=0; k<multi_k.length()-1; k++)
00180         if(multi_k[k]>multi_k[k+1])
00181             PLERROR("values in option multi_k *must* be in increaisng order");
00182 
00183 }
00184 
00185 // ### Nothing to add here, simply calls build_
00186 void KNNClassifier::build()
00187 {
00188     inherited::build();
00189     build_();
00190 }
00191 
00192 
00193 void KNNClassifier::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00194 {
00195     deepCopyField(knn_output,    copies);
00196     deepCopyField(knn_costs,     copies);
00197     deepCopyField(class_weights, copies);
00198     deepCopyField(multi_k_output,copies);
00199     deepCopyField(multi_k_input, copies);
00200     deepCopyField(knn,           copies);
00201     deepCopyField(kernel,        copies);
00202     deepCopyField(multi_k,       copies);
00203     inherited::makeDeepCopyFromShallowCopy(copies);
00204 }
00205 
00206 
00207 int KNNClassifier::outputsize() const
00208 {
00209     return nclasses;
00210 }
00211 
00212 
00213 void KNNClassifier::setTrainingSet(VMat training_set, bool call_forget)
00214 {
00215     PLASSERT( knn );
00216     inherited::setTrainingSet(training_set,call_forget);
00217 
00218     // Now we carry out a little bit of tweaking on the embedded knn:
00219     // - ask to output targets only
00220     // - set number of neighbors
00221     // - set training set (which performs a build if necessary)
00222     int n = training_set.length();
00223     int num_neighbors = max(kmin, int(kmult*pow(double(n),double(kpow))));
00224     knn->num_neighbors = num_neighbors;
00225     knn->copy_input  = kernel.isNotNull();
00226     knn->copy_target = true;
00227     knn->copy_weight = true;
00228     knn->copy_index  = false;
00229     knn->setTrainingSet(training_set,call_forget);
00230     knn_costs.resize(num_neighbors); // Changed for compatibility with HyperLearner
00231     //knn_costs.resize(knn->nTestCosts());
00232     knn_output.resize(knn->outputsize());
00233 }
00234 
00235 void KNNClassifier::forget()
00236 {
00237     PLASSERT( knn );
00238     knn->forget();
00239 }
00240     
00241 void KNNClassifier::train()
00242 {
00243     PLASSERT( knn );
00244     knn->distance_kernel->train(train_set);
00245     knn->train();
00246 }
00247 
00248 void KNNClassifier::computeOutput(const Vec& input, Vec& output) const
00249 {
00250     output.resize(outputsize());
00251 
00252     // The case where a user-specified kernel complicates the situation 
00253     const int inputsize = input.size();
00254     Vec knn_targets;                           
00255     knn->computeOutputAndCosts(input, knn_targets, knn_output, knn_costs);
00256     real* output_data = knn_output.data();
00257   
00258     int n_multi_k = multi_k.length();
00259     if(n_multi_k>0)
00260     {
00261         // First remember the input so we can verify computeCostsFromOutputs is called on the same
00262         multi_k_input.resize(input.length());
00263         multi_k_input << input; 
00264         // Then initialize the multi_k_output matrix.
00265         multi_k_output.resize(n_multi_k, outputsize());
00266         multi_k_output.fill(0);
00267         // TODO: need to sort the knn output ?...
00268     }
00269 
00270     // Cumulate the class weights.  Compute the kernel if it's required.
00271     class_weights.resize(nclasses);
00272     class_weights.fill(0.0);
00273     real total_weight = 0.0;
00274     for (int i=0, n=knn->num_neighbors, multi_pos=0 ; i<n ; ++i) {
00275         real w = -1.0;                           
00276         if (kernel) {
00277             Vec cur_input(inputsize, output_data);
00278             w = kernel(cur_input, input);
00279             output_data += inputsize;
00280         }
00281         else if (use_knn_costs_as_weights)
00282             w = knn_costs[i];
00283         else
00284             w = 1.0;
00285         int nn_class = int(*output_data++);
00286         if (nn_class < 0 || nn_class >= nclasses)
00287             PLERROR("KNNClassifier::computeOutput: expected the class to be between 0 "
00288                     "and %d but found %d", nclasses-1, nn_class);
00289         w *= *output_data++;                     
00290         PLASSERT( w >= 0.0 );
00291         class_weights[nn_class] += w;
00292         total_weight += w;
00293         if(multi_pos<n_multi_k && multi_k[multi_pos]==i+1) // we want to keep the output for k==i+1
00294         {
00295             if (total_weight >= 1e-6)
00296             {
00297                 Vec output_k = multi_k_output(multi_pos);
00298                 output_k << class_weights;
00299                 output_k *= 1/total_weight;
00300             }
00301             ++multi_pos;
00302         }
00303     }
00304 
00305     // If the total weight is too small, output zero probability for all classes
00306     if (total_weight < 1e-6) {
00307         output.fill(0.0);
00308         return;
00309     }
00310   
00311     // Now compute probabilities
00312     for (int i=0, n = nclasses; i<n ; ++i)
00313         class_weights[i] /= total_weight;
00314 
00315     // And output them
00316     copy(class_weights.begin(), class_weights.end(), output.begin());
00317 }
00318 
00319 void KNNClassifier::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00320                                             const Vec& target, Vec& costs) const
00321 {
00322     int n_multi_k = multi_k.length();
00323     costs.resize(2*(1+n_multi_k));
00324     int t = int(target[0]);
00325     int sel_class = argmax(output);
00326     costs[0] = sel_class != t; 
00327     costs[1] = -pl_log(1e-10+output[t]);
00328 
00329     if(n_multi_k>0 && input!=multi_k_input)
00330         PLWARNING("In computeCostsFromOutputs: input appears different from multi_k_input. "
00331                   "This probably means that computeOutput was called on a different input "
00332                   "before calling computeCostsFromOutputs. As a consequence, the extra costs "
00333                   "requested through the multi_k option will be incorrect");
00334         
00335     for(int k=0; k<n_multi_k; k++)
00336     {
00337         Vec output_k = multi_k_output(k);
00338         int sel_class = argmax(output_k);
00339         costs[2+2*k] = sel_class != t; 
00340         costs[3+2*k] = -pl_log(1e-10+output_k[t]);
00341     }
00342 }
00343 
00344 TVec<string> KNNClassifier::getTestCostNames() const
00345 {
00346     int n_multi_k = multi_k.length();
00347     TVec<string> costs(2*(1+n_multi_k));
00348     costs[0] = "class_error";
00349     costs[1] = "neglogprob";
00350     for(int k=0; k<n_multi_k; k++)
00351     {
00352         string kstr = tostring(multi_k[k]);
00353         costs[2+2*k] = "class_error_"+kstr;
00354         costs[3+2*k] = "neglogprob_"+kstr;
00355     }
00356     return costs;
00357 }
00358 
00359 TVec<string> KNNClassifier::getTrainCostNames() const
00360 {
00361     return TVec<string>();
00362 }
00363 
00364 
00365 } // end of namespace PLearn
00366 
00367 
00368 /*
00369   Local Variables:
00370   mode:c++
00371   c-basic-offset:4
00372   c-file-style:"stroustrup"
00373   c-file-offsets:((innamespace . 0)(inline-open . 0))
00374   indent-tabs-mode:nil
00375   fill-column:79
00376   End:
00377 */
00378 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines