PLearn 0.1
NGramDistribution.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NGramDistribution.cc
00004 //
00005 // Copyright (C) 2004 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************
00036  * $Id: NGramDistribution.cc 8173 2007-10-10 22:43:12Z larocheh $
00037  ******************************************************* */
00038 
00039 // Authors: Hugo Larochelle
00040 
00044 #include "NGramDistribution.h"
00045 #include <plearn/vmat/FractionSplitter.h>
00046 #include <plearn/vmat/RepeatSplitter.h>
00047 
00048 namespace PLearn {
00049 using namespace std;
00050 
00052 // NGramDistribution //
00054 NGramDistribution::NGramDistribution() :
00055     nan_replace(false),
00056     n(2),
00057     additive_constant(0),
00058     discount_constant(0.01), 
00059     smoothing("no_smoothing"),
00060     lambda_estimation("manual")
00061 {
00062     forget();
00063     // In a N-Gram, the predicted size is always one.
00064     predicted_size = 1;
00065     predictor_size = -1;
00066 }
00067 
00068 PLEARN_IMPLEMENT_OBJECT(NGramDistribution,
00069                         "NGram distribution P(w_i|w_{i-n+1}^{i-1})",
00070                         "Takes a sequence of contexts of symbols (integers)"
00071                         "and computes a ngram language model. Several smoothing techniques"
00072                         "are offered."
00073     );
00074 
00076 // declareOptions //
00078 void NGramDistribution::declareOptions(OptionList& ol)
00079 {
00080     // ### Declare all of this object's options here
00081     // ### For the "flags" of each option, you should typically specify
00082     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00083     // ### OptionBase::tuningoption. Another possible flag to be combined with
00084     // ### is OptionBase::nosave
00085 
00086     declareOption(ol, "nan_replace", &NGramDistribution::nan_replace, 
00087                   OptionBase::buildoption,
00088                   "Indication that the missing values in context (nan) should be\n"
00089                   "replaced by a default value (-1). nan fields should correspond\n"
00090                   "to context not accessible (like in the beginning of a sentence).\n"
00091                   "If this parameter is false, than the shortest ngram is inserted\n"
00092                   "in the NGramTree."
00093         );
00094 
00095     declareOption(ol, "n", &NGramDistribution::n, OptionBase::buildoption,
00096         "Length of the n-gram (this option overrides the inherited options\n"
00097         "'predictor_size' and 'predicted_size', i.e. predictor_size = n-1\n"
00098         "and predicted_size = 1.");
00099 
00100     declareOption(ol, "additive_constant", &NGramDistribution::additive_constant, 
00101                   OptionBase::buildoption,
00102                   "Additive constant for add-delta smoothing");
00103 
00104     declareOption(ol, "discount_constant", &NGramDistribution::discount_constant, 
00105                   OptionBase::buildoption,
00106                   "Discount constant for absolut discounting smoothing");
00107 
00108     declareOption(ol, "smoothing", &NGramDistribution::smoothing, 
00109                   OptionBase::buildoption,
00110                   "Smoothing method. Choose among:\n"
00111                   "- \"no_smoothing\"\n"
00112                   "- \"add-delta\"\n"
00113                   "- \"jelinek-mercer\"\n"
00114                   "- \"witten-bell\"\n"
00115                   "- \"absolute-discounting\"\n"
00116         );
00117     declareOption(ol, "lambda_estimation", &NGramDistribution::lambda_estimation, 
00118                   OptionBase::buildoption,
00119                   "Lambdas estimation method. Choose among:\n"
00120                   "- \"manual\" (lambdas field should be specified)\n"
00121                   "- \"EM\"\n"
00122         );
00123     declareOption(ol, "lambdas", &NGramDistribution::lambdas, 
00124                   OptionBase::buildoption,
00125                   "Lambdas of the interpolated ngram");
00126 
00127     declareOption(ol, "validation_set", &NGramDistribution::validation_set, 
00128                   OptionBase::buildoption,
00129                   "Validation set used to estimate the lambdas with the\n"
00130                   "EM algorithm.");
00131 
00132     declareOption(ol, "tree", &NGramDistribution::tree, OptionBase::learntoption,
00133                   "NGramTree of the frequencies");
00134 
00135     declareOption(ol, "voc_size", &NGramDistribution::voc_size, 
00136                   OptionBase::learntoption,
00137                   "Vocabulary size");
00138 
00139     // Now call the parent class' declareOptions().
00140     inherited::declareOptions(ol);
00141 
00142     redeclareOption(ol, "predictor_size",  &NGramDistribution::predictor_size,
00143                   OptionBase::nosave,
00144                   "Defined at build time.");
00145 
00146     redeclareOption(ol, "predicted_size",  &NGramDistribution::predicted_size,
00147                   OptionBase::nosave,
00148                   "Defined at build time.");
00149 }
00150 
00152 // build //
00154 void NGramDistribution::build()
00155 {
00156     // now set in the constructor to -1
00157     predictor_size = n - 1;
00158     inherited::build();
00159     build_();
00160 }
00161 
00163 // build_ //
00165 void NGramDistribution::build_()
00166 {
00167     if(train_set)
00168     {
00169         if(inputsize() != n) PLERROR("In NGramDistribution:build_() : input size should be n=%d", n);
00170         Vec values;
00171         train_set->getValues(0,n-1,values);
00172         voc_size = values.length();
00173         if(voc_size <= 0) PLERROR("In NGramDistribution:build_() : vocabulary size is <= 0");
00174 
00175         if(nan_replace) voc_size++;
00176 
00177         if(smoothing == "absolute-discounting")
00178         {
00179             if(discount_constant < 0 || discount_constant > 1)
00180                 PLERROR("In NGramDistribution:build_() : discount constant should be in [0,1]");
00181         }
00182     }
00183 }
00184 
00186 // cdf //
00188 real NGramDistribution::cdf(const Vec& y) const
00189 {
00190     PLERROR("cdf not implemented for NGramDistribution"); return 0;
00191 }
00192 
00194 // expectation //
00196 void NGramDistribution::expectation(Vec& mu) const
00197 {
00198     PLERROR("expectation not implemented for NGramDistribution");
00199 }
00200 
00201 
00203 // forget //
00205 void NGramDistribution::forget()
00206 {
00207     tree = new NGramTree();
00208 }
00209 
00211 // generate //
00213 void NGramDistribution::generate(Vec& y) const
00214 {
00215 
00216     PLERROR("generate not implemented for NGramDistribution");
00217 }
00218 
00220 // log_density //
00222 real NGramDistribution::log_density(const Vec& y) const
00223 {
00224     return safeflog(density(y));
00225 }
00226 
00227 real NGramDistribution::density(const Vec& y) const
00228 {
00229     if(is_missing(y[0])) PLERROR("In NGramDistribution:density() : y[0] is missing");
00230 
00231     // Making ngram
00232 
00233     static TVec<int> ngram;
00234 
00235     Vec row(n);
00236     row[n-1] = y[0];
00237     for(int i=0; i<n-1; i++)
00238         row[i] = predictor_part[i];
00239 
00240     getNGrams(row,ngram);
00241 
00242     // Computing P(w_i|w_{i-n+1}^{i-1})
00243 
00244     TVec<int> freq;
00245     TVec<int> normalization;
00246     int ngram_length = ngram.length();
00247 
00248     if(smoothing == "no_smoothing")
00249     {
00250         freq = tree->freq(ngram);
00251         normalization = tree->normalization(ngram);
00252         if(normalization[ngram_length-1] == 0)
00253             return 1.0/voc_size;
00254         return ((real)freq[ngram_length-1])/normalization[ngram_length-1];
00255     }
00256     else if(smoothing == "add-delta")
00257     {
00258         freq = tree->freq(ngram);
00259         normalization = tree->normalization(ngram);
00260         return ((real)freq[ngram_length-1] + additive_constant)/(normalization[ngram_length-1] + additive_constant*voc_size);
00261     }
00262     else if(smoothing == "jelinek-mercer")
00263     {
00264         freq = tree->freq(ngram);
00265         normalization = tree->normalization(ngram);
00266         real ret = 1.0/voc_size*lambdas[0];
00267         real norm = lambdas[0]; // For ngram smaller than n...
00268 
00269         for(int j=0; j<ngram_length;j++)
00270         {
00271             if(normalization[j] != 0)
00272             {
00273                 ret += lambdas[j+1] * (((real)freq[j])/normalization[j]);
00274                 norm += lambdas[j+1];
00275             }
00276         }
00277         return ret/norm;
00278     }
00279     else if(smoothing == "absolute-discounting")
00280     {
00281         freq = tree->freq(ngram);
00282         normalization = tree->normalization(ngram);
00283         TVec<int> n_freq = tree->n_freq(ngram);
00284         real ret = 0;
00285         real factor = 1;
00286         for(int j=ngram_length-1; j>=0; j--)
00287         {
00288             if(normalization[j] != 0)
00289             {
00290                 ret += factor * ((real)(freq[j] > discount_constant ? freq[j] - discount_constant : 0))/ normalization[j];
00291                 factor = factor * ((real)discount_constant)/normalization[j] * n_freq[j];
00292             }
00293         }
00294         ret += factor *1.0/voc_size;
00295 
00296         return ret;
00297     }
00298     else if(smoothing == "witten-bell")
00299     {
00300         freq = tree->freq(ngram);
00301         normalization = tree->normalization(ngram);
00302         TVec<int> n_freq = tree->n_freq(ngram);
00303         real ret = 1.0/voc_size;
00304         for(int j=0; j<ngram_length; j++)
00305         {
00306             if(normalization[j] != 0)
00307                 ret = (freq[j]+n_freq[j]*ret)/(normalization[j]+n_freq[j]);
00308         }
00309 
00310         return ret;
00311     }
00312     else PLERROR("In NGramDistribution:density() : smoothing technique not valid");
00313     return 0;
00314 }
00315 
00317 // makeDeepCopyFromShallowCopy //
00319 void NGramDistribution::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00320 {
00321     inherited::makeDeepCopyFromShallowCopy(copies);
00322 
00323     deepCopyField(lambdas, copies);
00324     deepCopyField(tree, copies);
00325 
00326     // ### Remove this line when you have fully implemented this method.
00327     //PLERROR("NGramDistribution::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00328 }
00329 
00331 // survival_fn //
00333 real NGramDistribution::survival_fn(const Vec& y) const
00334 {
00335     PLERROR("survival_fn not implemented for NGramDistribution"); return 0;
00336 }
00337 
00339 // variance //
00341 void NGramDistribution::variance(Mat& covar) const
00342 {
00343     PLERROR("variance not implemented for NGramDistribution");
00344 }
00345 
00346 void NGramDistribution::getNGrams(Vec row, TVec<int>& ngram) const
00347 {
00348     if(is_missing(row[row.length()-1])) PLERROR("In getNGrams() : last element of row is NaN");
00349 
00350     int insert_from = 0;
00351     //Looking for nan
00352     if(!nan_replace)
00353         for(int j=0; j<row.length(); j++)
00354             if(is_missing(row[j]))
00355                 insert_from = j+1;
00356 
00357     ngram.resize(n-insert_from);
00358 
00359     //Making ngram
00360     for(int j=insert_from; j<row.length(); j++)
00361     {
00362         if(is_missing(row[j]))
00363             ngram[j-insert_from] = -1;
00364         else
00365             ngram[j-insert_from] = (int)row[j];
00366     }
00367 }
00368 
00369 void NGramDistribution::train()
00370 {
00371 
00372 //    if(smoothing == "jelinek-mercer" && lambda_estimation == "EM")
00373 //    {
00374 //        if(validation_proportion <= 0 || validation_proportion >= 1)
00375 //            PLERROR("In NGramDistribution:build_() : validation_proportion should be in (0,1)");
00376 //        // Making FractionSplitter
00377 //        PP<FractionSplitter> fsplit = new FractionSplitter();
00378 //        TMat<pair<real,real> > splits(1,2);
00379 //        splits(0,0).first = 0; splits(0,0).second = 1-validation_proportion;
00380 //        splits(0,1).first = 1-validation_proportion; splits(0,1).second = 1;
00381 //        fsplit->splits = splits;
00382 //        fsplit->build();
00383 //
00384 //        // Making RepeatSplitter
00385 //        PP<RepeatSplitter> rsplit = new RepeatSplitter();
00386 //        rsplit->n = 1;
00387 //        rsplit->shuffle = true;
00388 //        rsplit->seed = 123456;
00389 //        rsplit->to_repeat = fsplit;
00390 //        rsplit->setDataSet(train_set);
00391 //        rsplit->build();
00392 //
00393 //        TVec<VMat> vmat_splits = rsplit->getSplit();
00394 //        contexts_train = vmat_splits[0];
00395 //        contexts_validation = vmat_splits[1];
00396 //    }
00397 //    else
00398 
00399 
00400     //Putting ngrams in the tree
00401     Vec row(n);
00402     TVec<int> int_row(n);
00403 
00404     if(stage == 0 && nstages>0)
00405     {
00406         PP<ProgressBar> pb =  new ProgressBar("Inserting ngrams in NGramTree", train_set->length());
00407         for(int i=0; i<train_set->length(); i++)
00408         {
00409             train_set->getRow(i,row);
00410             getNGrams(row,int_row);
00411             tree->add(int_row);
00412             
00413             pb->update(i+1);
00414         }
00415         stage++;
00416         if(smoothing == "jelinek-mercer" && lambda_estimation == "EM")
00417             stage--; //Will be incremented in EM estimation
00418     }
00419 
00420     // Smoothing techniques parameter estimation
00421     if(smoothing == "jelinek-mercer")
00422     {
00423         //Jelinek-Mercer: EM estimation of lambdas
00424         if(lambda_estimation == "EM")
00425         {
00426             if(stage == 0) 
00427             {
00428                 lambdas.resize(n+1); lambdas.fill(1.0/(n+1));
00429             }
00430             if(!validation_set) PLERROR("In NGramDistribution:build_() : "
00431                                         "validation_set needs to be provided");
00432             real diff = EM_PRECISION+1;
00433             real l_old = 0, l_new = -REAL_MAX;
00434             Vec e(n+1);
00435             Vec p(n+1);
00436             TVec<int> ngram(n);
00437             real p_sum = 0;
00438             int n_ngram = 0;
00439             //while(diff > EM_PRECISION)
00440             while(stage < nstages)
00441             {
00442                 if(verbosity > 2)
00443                     cout << "EM diff: " << diff << endl;
00444                 n_ngram = 0;
00445                 l_old = l_new; l_new = 0;
00446 
00447                 // E step
00448 
00449                 e.fill(0);
00450                 //for(int t=0; t<contexts_validation->length(); t++)
00451                 for(int t=0; t<validation_set->length(); t++)
00452                 {
00453                     p_sum = 0;
00454 
00455                     // get w_{t-n+1}^t
00456 
00457                     //contexts_validation->getRow(t,row);
00458                     validation_set->getRow(t,row);
00459                     getNGrams(row,ngram);
00460 
00461                     TVec<int> freq = tree->freq(ngram);
00462                     TVec<int> normalization = tree->normalization(ngram);
00463                     if(normalization[ngram.length()-1] != 0)
00464                     {
00465                         n_ngram++;
00466                         p.fill(0);
00467                         p[0] = lambdas[0]*1.0/voc_size;
00468                         p_sum += p[0];
00469                         for(int j=0; j<ngram.length(); j++)
00470                         {
00471                             p[j+1] = lambdas[j+1]*(((real)freq[j])/normalization[j]);
00472                             p_sum += p[j+1];
00473                         }
00474 
00475                         for(int j=0; j<e.length(); j++)
00476                             e[j] += p[j]/p_sum;
00477                         l_new += safeflog(p_sum);
00478                     }
00479                 }
00480                 if(n_ngram == 0) PLERROR("In NGramDistribution:train() : no ngram in validation set");
00481                 // M step
00482                 for(int j=0; j<lambdas.length(); j++)
00483                     lambdas[j] = e[j]/n_ngram;
00484 
00485                 diff = l_new-l_old;
00486                 stage++;
00487             }
00488 
00489             //Test
00490 
00491             real temp = 0;
00492             for(int j=0; j<lambdas.length(); j++)
00493                 temp += lambdas[j];
00494             if(abs(temp-1) > THIS_PRECISION)
00495                 PLERROR("oups, lambdas don't sum to one after EM!!");
00496         }
00497         else if(lambda_estimation == "manual")
00498         {
00499             if(lambdas.length() != n+1) PLERROR("In NGramDistribution:build_() : lambdas' length should be %d, not %d", n+1, lambdas.length());
00500             real sum = 0;
00501             for(int j=0; j<lambdas.length(); j++)
00502             {
00503                 if(lambdas[j]<0) PLERROR("In NGramDistribution:build_() : all lambdas should be non negative");
00504                 sum += lambdas[j];
00505             }
00506             if(abs(sum) < THIS_PRECISION)
00507                 lambdas.fill(1.0/(n+1));
00508             else
00509                 lambdas *= 1.0/sum;
00510         }
00511         else PLERROR("In NGramDistribution:build_() : lambda estimation not valid");
00512 
00513     }
00514 
00515 }
00516 
00517 } // end of namespace PLearn
00518 
00519 
00520 /*
00521   Local Variables:
00522   mode:c++
00523   c-basic-offset:4
00524   c-file-style:"stroustrup"
00525   c-file-offsets:((innamespace . 0)(inline-open . 0))
00526   indent-tabs-mode:nil
00527   fill-column:79
00528   End:
00529 */
00530 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines