PLearn 0.1
FeatureSetNaiveBayesClassifier.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // FeatureSetNaiveBayesClassifier.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 
00038 #include "FeatureSetNaiveBayesClassifier.h"
00039 #include <plearn/vmat/SubVMatrix.h>
00040 
00041 namespace PLearn {
00042 using namespace std;
00043 
00044 PLEARN_IMPLEMENT_OBJECT(FeatureSetNaiveBayesClassifier, "Naive Bayes classifier on a feature set space.", 
00045                         "This classifier is bases on the estimation of\n"
00046                         "P(y|x) where y is a class and x is the input.\n"
00047                         "In this naive bayes model, we have:\n"
00048                         "  P(y|x) = P(y) \\prod_ji P(f_i(x_j)|y)\n"
00049                         "where f_i(x_j) is the ith feature of the jth\n"
00050                         "component of x. P(y) and P(f_(x_j)|y) are\n"
00051                         "estimated by maximum likelihood, but a smoothing\n"
00052                         "additive constant to the observation counts\n"
00053                         "can be specified. Feature sets must also be\n"
00054                         "provided.");
00055 
00056 FeatureSetNaiveBayesClassifier::FeatureSetNaiveBayesClassifier() // DEFAULT VALUES FOR ALL OPTIONS
00057     :
00058 rgen(new PRandom()),
00059 possible_targets_vary(0),
00060 input_dependent_posterior_estimation(0),
00061 smoothing_constant(0)
00062 {}
00063 
00064 FeatureSetNaiveBayesClassifier::~FeatureSetNaiveBayesClassifier()
00065 {
00066 }
00067 
00068 void FeatureSetNaiveBayesClassifier::declareOptions(OptionList& ol)
00069 {
00070     declareOption(ol, "possible_targets_vary", &FeatureSetNaiveBayesClassifier::possible_targets_vary, 
00071                   OptionBase::buildoption, 
00072                   "Indication that the set of possible targets vary from\n"
00073                   "one input vector to another.\n");
00074 
00075     declareOption(ol, "feat_sets", &FeatureSetNaiveBayesClassifier::feat_sets, 
00076                   OptionBase::buildoption, 
00077                   "FeatureSets to apply on input.\n");
00078 
00079     declareOption(ol, "input_dependent_posterior_estimation", &FeatureSetNaiveBayesClassifier::input_dependent_posterior_estimation, 
00080                   OptionBase::buildoption, 
00081                   "Indication that different estimations of\n"
00082                   "the posterior probability of a feature given a class\n"
00083                   "should be used for different inputs.\n");
00084     
00085     declareOption(ol, "smoothing_constant", &FeatureSetNaiveBayesClassifier::smoothing_constant, 
00086                   OptionBase::buildoption, 
00087                   "Add-delta smoothing constant.\n");
00088     
00089     declareOption(ol, "feature_class_counts", &FeatureSetNaiveBayesClassifier::feature_class_counts, 
00090                   OptionBase::learntoption, 
00091                   "Feature-class pair counts.\n");
00092     
00093     declareOption(ol, "sum_feature_class_counts", &FeatureSetNaiveBayesClassifier::sum_feature_class_counts, 
00094                   OptionBase::learntoption, 
00095                   "Sums of feature-class pair counts, over features.\n");
00096     
00097     declareOption(ol, "class_counts", &FeatureSetNaiveBayesClassifier::class_counts, 
00098                   OptionBase::learntoption, 
00099                   "Class counts.\n");
00100     
00101     inherited::declareOptions(ol);
00102 
00103 }
00104 
00106 // build //
00108 void FeatureSetNaiveBayesClassifier::build()
00109 {
00110     inherited::build();
00111     build_();
00112 }
00113 
00114 
00116 // build_ //
00118 void FeatureSetNaiveBayesClassifier::build_()
00119 {
00120     // Don't do anything if we don't have a train_set
00121     // It's the only one who knows the inputsize, targetsize and weightsize
00122 
00123     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00124     {
00125         if(targetsize_ != 1)
00126             PLERROR("In FeatureSetNaiveBayesClassifier::build_(): targetsize_ must be 1, not %d",targetsize_);
00127 
00128         if(weightsize_ > 0)
00129             PLERROR("In FeatureSetNaiveBayesClassifier::build_(): weightsize_ > 0 is not supported");
00130 
00131         n_feat_sets = feat_sets.length();
00132         if(n_feat_sets == 0)
00133             PLERROR("In FeatureSetNaiveBayesClassifier::build_(): at least one FeatureSet must be provided\n");
00134 
00135         if(inputsize_ % n_feat_sets != 0)
00136             PLERROR("In FeatureSetNaiveBayesClassifier::build_(): feat_sets.length() must be a divisor of inputsize()");
00137 
00138         PP<Dictionary> dict = train_set->getDictionary(inputsize_);
00139         total_output_size = dict->size();
00140 
00141         total_feats_per_token = 0;
00142         for(int i=0; i<n_feat_sets; i++)
00143             total_feats_per_token += feat_sets[i]->size();
00144 
00145         if(stage <= 0)
00146         {
00147             if(input_dependent_posterior_estimation)
00148             {
00149                 feature_class_counts.resize(inputsize_/n_feat_sets);
00150                 sum_feature_class_counts.resize(inputsize_/n_feat_sets);
00151                 for(int i=0; i<feature_class_counts.length(); i++)
00152                 {
00153                     feature_class_counts[i].resize(total_output_size);
00154                     sum_feature_class_counts[i].resize(total_output_size);
00155                     for(int j=0; j<total_output_size; j++)
00156                     {
00157                         feature_class_counts[i][j].clear();
00158                         sum_feature_class_counts[i][j] = 0;
00159                     }
00160                 }
00161 
00162                 class_counts.resize(total_output_size);
00163                 class_counts.fill(0);
00164             }
00165             else
00166             {
00167                 feature_class_counts.resize(1);
00168                 sum_feature_class_counts.resize(1);
00169                 feature_class_counts[0].resize(total_output_size);
00170                 sum_feature_class_counts[0].resize(total_output_size);
00171                 for(int j=0; j<total_output_size; j++)
00172                 {
00173                     feature_class_counts[0][j].clear();
00174                     sum_feature_class_counts[0][j] = 0;
00175                 }
00176                 
00177                 class_counts.resize(total_output_size);
00178                 class_counts.fill(0);
00179             }
00180         }
00181                 
00182         output_comp.resize(total_output_size);
00183         row.resize(train_set->width());
00184         row.fill(MISSING_VALUE);
00185         feats.resize(inputsize_);
00186         // Making sure that all feats[i] have non null storage...
00187         for(int i=0; i<feats.length(); i++)
00188         {
00189             feats[i].resize(1);
00190             feats[i].resize(0);
00191         }
00192         val_string_reference_set = train_set;
00193         target_values_reference_set = train_set;
00194 
00195         if (seed_>=0)
00196             rgen->manual_seed(seed_);
00197     }
00198 }
00199 
00201 // computeCostsFromOutputs //
00203 void FeatureSetNaiveBayesClassifier::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv, 
00204                                    const Vec& targetv, Vec& costsv) const
00205 {
00206     PLERROR("In FeatureSetNaiveBayesClassifier::computeCostsFromOutputs(): output is not enough to compute costs");
00207 }
00208 
00209 int FeatureSetNaiveBayesClassifier::my_argmax(const Vec& vec, int default_compare) const
00210 {
00211 #ifdef BOUNDCHECK
00212     if(vec.length()==0)
00213         PLERROR("IN int argmax(const TVec<T>& vec) vec has zero length");
00214 #endif
00215     real* v = vec.data();
00216     int indexmax = default_compare;
00217     real maxval = v[default_compare];
00218     for(int i=0; i<vec.length(); i++)
00219         if(v[i]>maxval)
00220         {
00221             maxval = v[i];
00222             indexmax = i;
00223         }
00224     return indexmax;
00225 }
00226 
00227 
00229 // computeOutput //
00231 void FeatureSetNaiveBayesClassifier::computeOutput(const Vec& inputv, Vec& outputv) const
00232 {
00233     getProbs(inputv,output_comp);
00234     if(possible_targets_vary)
00235         outputv[0] = target_values[my_argmax(output_comp,rgen->uniform_multinomial_sample(output_comp.length()))];
00236     else
00237         outputv[0] = argmax(output_comp);
00238 }
00239 
00241 // computeOutputAndCosts //
00243 void FeatureSetNaiveBayesClassifier::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
00244                                  Vec& outputv, Vec& costsv) const
00245 {
00246     getProbs(inputv,output_comp);
00247     if(possible_targets_vary)
00248         outputv[0] = target_values[my_argmax(output_comp,rgen->uniform_multinomial_sample(output_comp.length()))];
00249     else
00250         outputv[0] = argmax(output_comp);
00251     costsv[0] = (outputv[0] == targetv[0] ? 0 : 1);
00252 }
00253 
00255 // forget //
00257 void FeatureSetNaiveBayesClassifier::forget()
00258 {
00259     stage = 0;
00260     if (train_set) build();
00261 }
00262 
00264 // getTrainCostNames //
00266 TVec<string> FeatureSetNaiveBayesClassifier::getTrainCostNames() const
00267 {
00268     TVec<string> ret(1);
00269     ret[0] = "class_error";
00270     return ret;
00271 }
00272 
00274 // getTestCostNames //
00276 TVec<string> FeatureSetNaiveBayesClassifier::getTestCostNames() const
00277 { 
00278     return getTrainCostNames();
00279 }
00280 
00282 // makeDeepCopyFromShallowCopy //
00284 void FeatureSetNaiveBayesClassifier::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00285 {
00286     inherited::makeDeepCopyFromShallowCopy(copies);
00287 
00288     // Private variables
00289     deepCopyField(target_values,copies);
00290     deepCopyField(output_comp,copies);
00291     deepCopyField(row,copies);
00292     deepCopyField(feats,copies);
00293 
00294     // Protected variables
00295     deepCopyField(val_string_reference_set,copies);
00296     deepCopyField(target_values_reference_set,copies);
00297     deepCopyField(rgen,copies);
00298 
00299     // Public variables
00300     deepCopyField(feature_class_counts,copies);
00301     deepCopyField(sum_feature_class_counts,copies);
00302     deepCopyField(class_counts,copies);
00303 
00304     // Public build options
00305     deepCopyField(feat_sets,copies);
00306 }
00307 
00309 // outputsize //
00311 int FeatureSetNaiveBayesClassifier::outputsize() const {
00312     return targetsize_;
00313 }
00314 
00316 // train //
00318 void FeatureSetNaiveBayesClassifier::train()
00319 {
00320     if(!train_set)
00321         PLERROR("In FeatureSetNaiveBayesClassifier::train, you did not setTrainingSet");
00322 
00323     //if(!train_stats)
00324     //    PLERROR("In FeatureSetNaiveBayesClassifier::train, you did not setTrainStatsCollector");
00325  
00326     Vec outputv(outputsize());
00327     Vec costsv(getTrainCostNames().length());
00328     Vec inputv(train_set->inputsize());
00329     Vec targetv(train_set->targetsize());
00330     real sample_weight=1;
00331     int l = train_set->length();  
00332 
00333     if(stage == 0)
00334     {
00335         PP<ProgressBar> pb;
00336         if(report_progress)
00337             pb = new ProgressBar("Training " + classname() 
00338                                  + " from stage 0 to " + tostring(l), l);
00339         int id = 0;
00340         for(int t=0; t<l;t++)
00341         {
00342             train_set->getExample(t,inputv,targetv,sample_weight);
00343 
00344             // Get possible target values
00345             if(possible_targets_vary) 
00346             {
00347                 row.subVec(0,inputsize_) << inputv;
00348                 train_set->getValues(row,inputsize_,
00349                                      target_values);
00350                 output_comp.resize(target_values.length());
00351             }
00352             
00353             // Get features
00354             nfeats = 0;
00355             for(int i=0; i<inputsize_; i++)
00356             {
00357                 str = train_set->getValString(i,inputv[i]);
00358                 feat_sets[i%n_feat_sets]->getFeatures(str,feats[i]);
00359                 nfeats += feats[i].length();
00360             }
00361 
00362             for(int i=0; i<inputsize_; i++)
00363             {
00364                 for(int j=0; j<feats[i].length(); j++)
00365                 {
00366                     if(input_dependent_posterior_estimation)
00367                         id = i/n_feat_sets;
00368                     else
00369                         id = 0;
00370 
00371                     if(feature_class_counts[id][(int)targetv[0]].find(feats[i][j]) == feature_class_counts[id][(int)targetv[0]].end())
00372                         feature_class_counts[id][(int)targetv[0]][feats[i][j]] = 1;
00373                     else
00374                         feature_class_counts[id][(int)targetv[0]][feats[i][j]] += 1;
00375 
00376                     sum_feature_class_counts[id][(int)targetv[0]] += 1;
00377                 }
00378             }
00379 
00380             class_counts[(int)targetv[0]] += 1;
00381 
00382             computeOutputAndCosts(inputv, targetv, outputv, costsv);
00383             train_stats->update(costsv);
00384 
00385             if(pb) pb->update(t);
00386         }
00387         stage = 1;
00388         train_stats->finalize();
00389         if(verbosity>1)
00390             cout << "Epoch " << stage << " train objective: " 
00391                  << train_stats->getMean() << endl;
00392     }
00393 }
00394 
00395 void FeatureSetNaiveBayesClassifier::getProbs(const Vec& inputv, Vec& outputv) const
00396 {
00397     // Get possible target values
00398     if(possible_targets_vary) 
00399     {
00400         row.subVec(0,inputsize_) << inputv;
00401         target_values_reference_set->getValues(row,inputsize_,
00402                                                target_values);
00403         outputv.resize(target_values.length());
00404     }
00405     
00406     // Get features
00407     nfeats = 0;
00408     for(int i=0; i<inputsize_; i++)
00409     {
00410         str = val_string_reference_set->getValString(i,inputv[i]);
00411         feat_sets[i%n_feat_sets]->getFeatures(str,feats[i]);
00412         nfeats += feats[i].length();
00413     }
00414     int id=0;
00415     
00416     if(possible_targets_vary)
00417     {
00418         for(int i=0; i<target_values.length(); i++)
00419         {
00420             outputv[i] = safeflog(class_counts[(int)target_values[i]]);
00421             for(int k=0; k<inputsize_; k++)
00422             {
00423                 if(input_dependent_posterior_estimation)
00424                     id = k/n_feat_sets;
00425                 else
00426                     id = 0;
00427                 
00428                 for(int j=0; j<feats[k].length(); j++)
00429                 {
00430                     outputv[i] -= safeflog(sum_feature_class_counts[id][(int)target_values[i]] + smoothing_constant*total_feats_per_token);
00431                     if(feature_class_counts[id][(int)target_values[i]].find(feats[k][j]) == feature_class_counts[id][(int)target_values[i]].end())
00432                         outputv[i] += safeflog(smoothing_constant);
00433                     else
00434                         outputv[i] += safeflog(feature_class_counts[id][(int)target_values[i]][feats[k][j]]+smoothing_constant);
00435                 }
00436             }
00437         }
00438     }
00439     else
00440     {
00441         for(int i=0; i<total_output_size; i++)
00442         {
00443             outputv[i] = safeflog(class_counts[i]);
00444             for(int k=0; k<inputsize_; k++)
00445             {
00446                 if(input_dependent_posterior_estimation)
00447                     id = k/n_feat_sets;
00448                 else
00449                     id = 0;
00450                 
00451                 for(int j=0; j<feats[k].length(); j++)
00452                 {
00453                     outputv[i] -= safeflog(sum_feature_class_counts[id][i] + smoothing_constant*total_feats_per_token);
00454                     if(feature_class_counts[id][i].find(feats[k][j]) == feature_class_counts[id][i].end())
00455                         outputv[i] += safeflog(smoothing_constant);
00456                     else
00457                         outputv[i] += safeflog(feature_class_counts[id][i][feats[k][j]]+smoothing_constant);
00458                 }
00459             }
00460         }
00461     }
00462 }
00463 
00464 void FeatureSetNaiveBayesClassifier::batchComputeOutputAndConfidence(VMat inputs, real probability,
00465                                          VMat outputs_and_confidence) const
00466 {
00467     val_string_reference_set = inputs;
00468     inherited::batchComputeOutputAndConfidence(inputs,probability,outputs_and_confidence);
00469     val_string_reference_set = train_set;
00470 }
00471 
00472 void FeatureSetNaiveBayesClassifier::use(VMat testset, VMat outputs) const
00473 {
00474     val_string_reference_set = testset;
00475     if(testset->width() > train_set->inputsize())
00476         target_values_reference_set = testset;
00477     target_values_reference_set = testset;
00478     inherited::use(testset,outputs);
00479     val_string_reference_set = train_set;
00480     if(testset->width() > train_set->inputsize())
00481         target_values_reference_set = train_set;
00482 }
00483 
00484 void FeatureSetNaiveBayesClassifier::test(VMat testset, PP<VecStatsCollector> test_stats, 
00485                       VMat testoutputs, VMat testcosts) const
00486 {
00487     val_string_reference_set = testset;
00488     target_values_reference_set = testset;
00489     inherited::test(testset,test_stats,testoutputs,testcosts);
00490     val_string_reference_set = train_set;
00491     target_values_reference_set = train_set;
00492 }
00493 
00494 VMat FeatureSetNaiveBayesClassifier::processDataSet(VMat dataset) const
00495 {
00496     VMat ret;
00497     val_string_reference_set = dataset;
00498     // Assumes it contains the target part information
00499     if(dataset->width() > train_set->inputsize())
00500         target_values_reference_set = dataset;
00501     ret = inherited::processDataSet(dataset);
00502     val_string_reference_set = train_set;
00503     if(dataset->width() > train_set->inputsize())
00504         target_values_reference_set = train_set;
00505     return ret;
00506 }
00507 
00508 } // end of namespace PLearn
00509 
00510 
00511 /*
00512   Local Variables:
00513   mode:c++
00514   c-basic-offset:4
00515   c-file-style:"stroustrup"
00516   c-file-offsets:((innamespace . 0)(inline-open . 0))
00517   indent-tabs-mode:nil
00518   fill-column:79
00519   End:
00520 */
00521 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines