PLearn 0.1
KNNRegressor.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // KNNRegressor.cc
00004 //
00005 // Copyright (C) 2004 Nicolas Chapados 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: KNNRegressor.cc 7042 2007-05-09 23:44:20Z saintmlx $ 
00037  ******************************************************* */
00038 
00039 // Authors: Nicolas Chapados
00040 
00043 #include "KNNRegressor.h"
00044 #include <assert.h>
00045 #include <math.h>
00046 
00047 #include <plearn/base/tostring.h>
00048 #include <plearn/math/TMat_maths.h>
00049 #include <plearn_learners/nearest_neighbors/ExhaustiveNearestNeighbors.h>
00050 #include <plearn/ker/EpanechnikovKernel.h>
00051 
00052 namespace PLearn {
00053 using namespace std;
00054 
00055 PLEARN_IMPLEMENT_OBJECT(
00056     KNNRegressor,
00057     "Implementation of the Nadaraya-Watson kernel estimator for regression",
00058     "This class provides a simple multivariate regressor based upon an\n"
00059     "enclosed K-nearest-neighbors finder (derived from\n"
00060     "GenericNearestNeighbors; specified with the 'knn' option).\n"
00061     "\n"
00062     "The class contains several options to determine the number of neighbors\n"
00063     "to use (K).  This number always overrides the option 'num_neighbors'\n"
00064     "that may have been specified in the GenericNearestNeighbors utility\n"
00065     "object.  Basically, the generic formula for the number of neighbors is\n"
00066     "\n"
00067     "    K = max(kmin, kmult*(n^kpow)),\n"
00068     "\n"
00069     "where 'kmin', 'kmult', and 'kpow' are options, and 'n' is the number of\n"
00070     "examples in the training set.\n"
00071     "\n"
00072     "The cost output from this class is:\n"
00073     "\n"
00074     "- 'mse', the mean-squared error, i.e. given an output o and target t,\n"
00075     "      mse(o,t) = sum_i (o[i]-t[i])^2,\n"
00076     "\n"
00077     "If the option 'use_knn_costs_as_weights' is true (by default), it is\n"
00078     "assumed that the costs coming from the 'knn' object are kernel\n"
00079     "evaluations for each nearest neighbor.  These are used as weights to\n"
00080     "determine the final class probabilities.  (NOTE: it is important to use\n"
00081     "a kernel that computes a SIMILARITY MEASURE, and not a DISTANCE MEASURE;\n"
00082     "the default EpanechnikovKernel has the proper behavior.)  If the option\n"
00083     "is false, an equal weighting is used (equivalent to square window).  In\n"
00084     "addition, a different weighting kernel may be specified with the\n"
00085     "'kernel' option.\n"
00086     "\n"
00087     "A local weighted regression model may be trained at each test point\n"
00088     "by specifying a 'local_model'.  For instance, to perform local linear\n"
00089     "regression, you may use a LinearRegressor for this purpose.\n"
00090     );
00091 
00092 KNNRegressor::KNNRegressor()
00093     : knn(new ExhaustiveNearestNeighbors(new EpanechnikovKernel(), false)),
00094       kmin(5),
00095       kmult(0.0),
00096       kpow(0.5),
00097       use_knn_costs_as_weights(true),
00098       kernel(),
00099       local_model()
00100 { }
00101 
00102 void KNNRegressor::declareOptions(OptionList& ol)
00103 {
00104     declareOption(
00105         ol, "knn", &KNNRegressor::knn, OptionBase::buildoption,
00106         "The K-nearest-neighbors finder to use (default is an\n"
00107         "ExhaustiveNearestNeighbors with a EpanechnikovKernel, lambda=1)");
00108 
00109     declareOption(
00110         ol, "kmin", &KNNRegressor::kmin, OptionBase::buildoption,
00111         "Minimum number of neighbors to use (default=5)");
00112 
00113     declareOption(
00114         ol, "kmult", &KNNRegressor::kmult, OptionBase::buildoption,
00115         "Multiplicative factor on n^kpow to determine number of neighbors to\n"
00116         "use (default=0)");
00117 
00118     declareOption(
00119         ol, "kpow", &KNNRegressor::kpow, OptionBase::buildoption,
00120         "Power of the number of training examples to determine number of\n"
00121         "neighbors (default=0.5)");
00122 
00123     declareOption(
00124         ol, "use_knn_costs_as_weights", &KNNRegressor::use_knn_costs_as_weights,
00125         OptionBase::buildoption,
00126         "Whether to weigh each of the K neighbors by the kernel evaluations,\n"
00127         "obtained from the costs coming out of the 'knn' object (default=true)");
00128 
00129     declareOption(
00130         ol, "kernel", &KNNRegressor::kernel, OptionBase::buildoption,
00131         "Disregard the 'use_knn_costs_as_weights' option, and use this kernel\n"
00132         "to weight the observations.  If this object is not specified\n"
00133         "(default), and the 'use_knn_costs_as_weights' is false, the\n"
00134         "rectangular kernel is used.");
00135 
00136     declareOption(
00137         ol, "local_model", &KNNRegressor::local_model, OptionBase::buildoption,
00138         "Train a local regression model from the K neighbors, weighted by\n"
00139         "the kernel evaluations.  This is carried out at each test point.");
00140   
00141     // Now call the parent class' declareOptions
00142     inherited::declareOptions(ol);
00143 }
00144 
00145 void KNNRegressor::build_()
00146 {
00147     if (!knn)
00148         PLERROR("KNNRegressor::build_: the 'knn' option must be specified");
00149 
00150     if (kmin <= 0)
00151         PLERROR("KNNRegressor::build_: the 'kmin' option must be strictly positive");
00152 }
00153 
00154 // ### Nothing to add here, simply calls build_
00155 void KNNRegressor::build()
00156 {
00157     inherited::build();
00158     build_();
00159 }
00160 
00161 
00162 void KNNRegressor::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00163 {
00164     deepCopyField(knn_output,           copies);
00165     deepCopyField(knn_costs,            copies);
00166     deepCopyField(knn,                  copies);
00167     deepCopyField(kernel,               copies);
00168     deepCopyField(local_model,          copies);
00169     inherited::makeDeepCopyFromShallowCopy(copies);
00170 }
00171 
00172 
00173 int KNNRegressor::outputsize() const
00174 {
00175     return train_set->targetsize();
00176 }
00177 
00178 
00179 void KNNRegressor::setTrainingSet(VMat training_set, bool call_forget)
00180 {
00181     PLASSERT( knn );
00182     inherited::setTrainingSet(training_set,call_forget);
00183 
00184     // Now we carry out a little bit of tweaking on the embedded knn:
00185     // - ask to report input+target+weight
00186     // - set number of neighbors
00187     // - set training set (which performs a build if necessary)
00188     int n = training_set.length();
00189     int num_neighbors = max(kmin, int(kmult*pow(double(n), double(kpow))));
00190     knn->num_neighbors = num_neighbors;
00191     knn->copy_input  = true;
00192     knn->copy_target = true;
00193     knn->copy_weight = true;
00194     knn->copy_index  = false;
00195     knn->setTrainingSet(training_set,call_forget);
00196     knn_costs.resize(knn->nTestCosts());
00197     knn_output.resize(knn->outputsize());
00198 }
00199 
00200 void KNNRegressor::forget()
00201 {
00202     PLASSERT( knn );
00203     knn->forget();
00204 }
00205     
00206 void KNNRegressor::train()
00207 {
00208     PLASSERT( knn );
00209     knn->train();
00210 }
00211 
00212 void KNNRegressor::computeOutput(const Vec& input, Vec& output) const
00213 {
00214     PLASSERT( output.size() == outputsize() );
00215 
00216     // Start by computing the nearest neighbors
00217     Vec knn_targets;                           
00218     knn->computeOutputAndCosts(input, knn_targets, knn_output, knn_costs);
00219 
00220     // A little sanity checking on the knn costs: make sure that they not all
00221     // zero as this certainly indicates a wrong kernel
00222     bool has_non_zero_costs = false;
00223     for (int i=0, n=knn_costs.size() ; i<n && !has_non_zero_costs ; ++i)
00224         has_non_zero_costs = !is_missing(knn_costs[i]) && !is_equal(knn_costs[i], 0.0);
00225     if (! has_non_zero_costs) {
00226         string input_str = tostring(input, PStream::pretty_ascii);
00227         PLWARNING("KNNRegressor::computeOutput: all %d neighbors have zero similarity with\n"
00228                   "input vector %s;\n"
00229                   "check the similarity kernel bandwidth.  Replacing them by uniform weights.",
00230                   knn_costs.size(), input_str.c_str());
00231         knn_costs.fill(1.0);
00232     }
00233   
00234     // For each neighbor, the KNN object outputs the following:
00235     //     1) input vector
00236     //     2) output vector
00237     //     3) the weight (in all cases)
00238     // We shall patch the weight of each neighbor (observation) to reflect
00239     // the effect of the kernel weighting
00240     const int inputsize    = input.size();
00241     const int outputsize   = output.size();
00242     const int weightoffset = inputsize+outputsize;
00243     const int rowwidth     = weightoffset+1;
00244     real* knn_output_data  = knn_output.data();
00245     real total_weight      = 0.0;
00246     for (int i=0, n=knn->num_neighbors; i<n; ++i, knn_output_data += rowwidth) {
00247         real w;
00248         if (kernel) {
00249             Vec cur_input(inputsize, knn_output_data);
00250             w = kernel(cur_input, input);
00251         }
00252         else if (use_knn_costs_as_weights)
00253             w = knn_costs[i];
00254         else
00255             w = 1.0;
00256 
00257         if (is_missing(w))
00258             w = 0.0;
00259     
00260         // Patch the existing weight
00261         knn_output_data[weightoffset] *= w;
00262         total_weight += knn_output_data[weightoffset];
00263     }
00264 
00265     // If total weight is too small, make the output all zeros
00266     if (total_weight < 1e-6) {
00267         output.fill(0.0);
00268         return;
00269     }
00270   
00271     // Now compute the output per se
00272     if (! local_model) {
00273         // If no local model was requested, simply perform a weighted
00274         // average of the nearest-neighbors
00275         output.fill(0.0);
00276         knn_output_data = knn_output.data();
00277         for (int i=0, n=knn->num_neighbors; i<n; ++i, knn_output_data+=rowwidth) {
00278             Vec cur_output(outputsize, knn_output_data+inputsize);
00279             multiplyAcc(output, cur_output,
00280                         knn_output_data[weightoffset] / total_weight);
00281         }
00282     }
00283     else {
00284         // Reinterpret knn_output as a training set and use local model
00285         Mat training_data = knn_output.toMat(knn->num_neighbors, rowwidth);
00286         VMat training_set(training_data);
00287         training_set->defineSizes(inputsize, outputsize, 1 /* weightsize */);
00288         local_model->setTrainingSet(training_set, true /* forget */);
00289         local_model->setTrainStatsCollector(new VecStatsCollector());
00290         local_model->train();
00291         local_model->computeOutput(input,output);
00292     }
00293 }
00294 
00295 void KNNRegressor::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00296                                            const Vec& target, Vec& costs) const
00297 {
00298     PLASSERT( costs.size() == 1 );
00299     costs[0] = powdistance(output,target,2);
00300 }
00301 
00302 bool KNNRegressor::computeConfidenceFromOutput(const Vec& input, const Vec& output,
00303                                                real probability,
00304                                                TVec< pair<real,real> >& intervals) const
00305 {
00306     if (! local_model)
00307         return false;                            
00308 
00309     // Assume that the local model has been trained; don't re-train it
00310     return local_model->computeConfidenceFromOutput(input, output, probability, intervals);
00311 }
00312 
00313 
00314 TVec<string> KNNRegressor::getTestCostNames() const
00315 {
00316     static TVec<string> costs(1);
00317     costs[0] = "mse";
00318     return costs;
00319 }
00320 
00321 TVec<string> KNNRegressor::getTrainCostNames() const
00322 {
00323     return TVec<string>();
00324 }
00325 
00326 
00327 } // end of namespace PLearn
00328 
00329 
00330 /*
00331   Local Variables:
00332   mode:c++
00333   c-basic-offset:4
00334   c-file-style:"stroustrup"
00335   c-file-offsets:((innamespace . 0)(inline-open . 0))
00336   indent-tabs-mode:nil
00337   fill-column:79
00338   End:
00339 */
00340 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines