PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // 00005 // Copyright (C) 2001,2002 Pascal Vincent 00006 // Copyright (C) 2005 University of Montreal 00007 // Copyright (C) 2007 Xavier Saint-Mleux, ApSTAT Technologies inc. 00008 // 00009 00010 // Redistribution and use in source and binary forms, with or without 00011 // modification, are permitted provided that the following conditions are met: 00012 // 00013 // 1. Redistributions of source code must retain the above copyright 00014 // notice, this list of conditions and the following disclaimer. 00015 // 00016 // 2. Redistributions in binary form must reproduce the above copyright 00017 // notice, this list of conditions and the following disclaimer in the 00018 // documentation and/or other materials provided with the distribution. 00019 // 00020 // 3. The name of the authors may not be used to endorse or promote 00021 // products derived from this software without specific prior written 00022 // permission. 00023 // 00024 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00025 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00026 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00027 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00028 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00029 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00030 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00031 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00032 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00033 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00034 // 00035 // This file is part of the PLearn library. For more information on the PLearn 00036 // library, go to the PLearn Web site at www.plearn.org 00037 00038 00039 00040 /* ******************************************************* 00041 * $Id: StatsCollector.cc 9774 2008-12-11 21:06:26Z nouiz $ 00042 * This file is part of the PLearn library. 00043 ******************************************************* */ 00044 00045 #include "StatsCollector.h" 00046 #include <plearn/base/stringutils.h> 00047 #include "TMat_maths.h" 00048 #include "pl_erf.h" 00049 #include "pl_math.h" 00050 #include <assert.h> 00051 #include <plearn/io/openString.h> 00052 #include <plearn/math/random.h> 00053 #include <plearn/base/RemoteDeclareMethod.h> 00054 00055 00056 namespace PLearn { 00057 using namespace std; 00058 00059 static const real SQRT_ABSOLUTE_TOLERANCE = sqrt(ABSOLUTE_TOLERANCE); 00060 static const real SQRT2_ABSOLUTE_TOLERANCE = sqrt(SQRT_ABSOLUTE_TOLERANCE); 00061 00062 PLEARN_IMPLEMENT_OBJECT( 00063 StatsCollector, 00064 "Collects basic statistics", 00065 "A StatsCollector allows to compute basic global statistics for a series\n" 00066 "of numbers, as well as statistics within automatically determined\n" 00067 "ranges.\n" 00068 "The first 'maxnvalues' encountered values will be used as reference\n" 00069 "points to define the ranges, so to get reasonable results, your\n" 00070 "sequence should be i.i.d., and NOT sorted!\n" 00071 "The 'maxnvalues' option also indicates the maximum number of unique\n" 00072 "values that will be kept in memory. It can be important for computing\n" 00073 "statistics such as the lift, that require to remember all values for an\n" 00074 "exact computation. One may set this option to '-1' in order to keep all\n" 00075 "values automatically (the only limitation being the amount of memory\n" 00076 "available).\n" 00077 "\n" 00078 "The following statistics are available:\n" 00079 " - E - Sample mean\n" 00080 " - V - Sample variance\n" 00081 " - STDDEV - Sample standard deviation\n" 00082 " - STDERROR - Standard error of the sample mean\n" 00083 " - SKEW - Skewness == E(X-mu)^3 / sigma^3\n" 00084 " - KURT - Excess Kurtosis == E(X-mu)^4 / sigma^4 - 3\n" 00085 " - MIN - Minimum value\n" 00086 " - MAX - Maximum value\n" 00087 " - AGEMIN - How many observations ago the min was observed\n" 00088 " - AGEMAX - How many observations ago the max was observed\n" 00089 " - RANGE - The range, i.e. MAX - MIN\n" 00090 " - SUM - Sum of observations \n" 00091 " - SUMSQ - Sum of squares\n" 00092 " - FIRST - First observation\n" 00093 " - LAST - Last observation\n" 00094 " - N - Total number of observations\n" 00095 " - NMISSING - Number of missing observations\n" 00096 " - NNONMISSING - Number of non-missing observations\n" 00097 " - SHARPERATIO - Mean divided by standard deviation\n" 00098 " - EoverSKEW - Mean divided by skewness\n" 00099 " - EoverSKEWms - Mean divided by skewness (special version for model seoection; see note below)\n" 00100 " - EoverKURT - Mean divided by kurtosis\n" 00101 " - ZSTAT - Z-statistic of the sample mean estimator\n" 00102 " - PZ1t - One-tailed probability of the Z-Statistic\n" 00103 " - PZ2t - Two-tailed probability of the Z-Statistic\n" 00104 " - PSEUDOQ(q) - Return the location of the pseudo-quantile q, where 0 < q < 1.\n" 00105 " NOTE that bin counting must be enabled, i.e. maxnvalues != 0\n" 00106 " - IQR - The interquartile range, i.e. PSEUDOQ(0.75) - PSEUDOQ(0.25)\n" 00107 " - PRR - The pseudo robust range, i.e. PSEUDOQ(0.99) - PSEUDOQ(0.01)\n" 00108 " - LIFT(f) - Lift computed at fraction f (0 <= f <= 1)\n" 00109 " - MEAN_LIFT - Area under lift curve, normalized by the number of examples\n" 00110 " - NIPS_LIFT - Area under lift curve as computed in NIPS'2004 challenge\n" 00111 " - PRBP - Precision / Recall Breakeven Point = value of precision and recall\n" 00112 " when they both are equal (computed for the positive class)\n" 00113 " - DMODE - Discrete distribution first mode\n" 00114 "\n" 00115 "Notes:\n" 00116 " - When computing LIFT-related statistics, all values encountered need to be stored\n" 00117 " which means that 'maxnvalues' should be set to a high value (or -1). Also, a value\n" 00118 " should be positive when the real target is the class of interest (positive example),\n" 00119 " and negative otherwise, the magnitude being the estimated likelihood of the example.\n" 00120 " - Formulas to compute LIFT-related statistics. Let n+ = number of positive examples,\n" 00121 " n = total number of examples, v_i the value assigned to example i, and assume\n" 00122 " examples are sorted by order of magnitude |v_i|:\n" 00123 " LIFT(f) = sum_{k=1}^{fn} 1_{v_i > 0} / (f * n+)\n" 00124 " NIPS_LIFT = (A_I - A) / A_I, with\n" 00125 " A = sum_{k=1}^n LIFT(k/n) / n\n" 00126 " A_I = (n / n+ - 1) / 2 * (n+ / n + 1) + 1\n" 00127 " See http://predict.kyb.tuebingen.mpg.de/pages/evaluation.php for details (note\n" 00128 " that the formulas on the web site and in the python script are different).\n" 00129 " - LIFT(f) actually returns - 100 * LIFT(f), so that lower means better, and it is\n" 00130 " scaled by 100, as it is common practice.\n" 00131 " - MEAN_LIFT actually returns -1 * MEAN_LIFT, so that lower means better.\n" 00132 " - The comments about the LIFT also apply to the BRPB statistic.\n" 00133 " - The skewness and kurtosis are computed in terms of UNCENTERED ACCUMULATORS,\n" 00134 " i.e. sum{(x-a)^n}, where a is the first observation encountered, and n is some integer\n" 00135 " - EoverSKEWms is defined as EoverSKEW when the both the numerator and denominator\n" 00136 " are positive, otherwise it is defined as -|EoverSKEW|, i.e. it is always negative;\n" 00137 " the intended purpose of this statistic is to serve as a model selection criterion,\n" 00138 " wherein one wants to encourage high E and low positive skewness.\n" 00139 " - For the skewness, defined as skewness == E(X-mu)^3 / sigma^3, we compute the top\n" 00140 " term as\n" 00141 "\n" 00142 " (x-a)^3+(3(x-a)^2+(a-mu)(3(x-a)+a-mu))(a-mu)\n" 00143 "\n" 00144 " - Likewise for the kurtosis, defined as kurtosis == E(x-mu)^4 / sigma^4 - 3, \n" 00145 " (note that this is the EXCESS kurtosis, whose value is 0 for a \n" 00146 " normal distribution), we compute the top term as\n" 00147 "\n" 00148 " (x-a)^4+(4(x-a)^3+(6(x-a)^2+(a-mu)(4(x-a)+a-mu))(a-mu))(a-mu)\n" 00149 "\n" 00150 " - (Nicolas remercie Dieu et Wolfram pour Mathematica)." 00151 ); 00152 00153 00154 StatsCollector::StatsCollector(int the_maxnvalues) 00155 : epsilon(0.0), 00156 maxnvalues(the_maxnvalues), 00157 no_removal_warnings(false), 00158 nmissing_(0.), 00159 nnonmissing_(0.), 00160 sumsquarew_(0.), 00161 sum_(0.), 00162 sumsquare_(0.), 00163 sumcube_(0.), 00164 sumfourth_(0.), 00165 min_(MISSING_VALUE), 00166 max_(MISSING_VALUE), 00167 agemin_(MISSING_VALUE), 00168 agemax_(MISSING_VALUE), 00169 first_(MISSING_VALUE), 00170 last_(MISSING_VALUE), 00171 more_than_maxnvalues(false), 00172 binary_(-1), 00173 integer_(-1), 00174 sorted(false) 00175 { 00176 build_(); 00177 } 00178 00179 int sortIdComparator(const void* i1, const void* i2) 00180 { 00181 real d = ((PairRealSCCType*)i1)->first - ((PairRealSCCType*)i2)->first; 00182 return (d<0)?-1:(fast_exact_is_equal(d, 0) ? 0:1); 00183 } 00184 00189 00202 void StatsCollector::declareOptions(OptionList& ol) 00203 { 00204 // buid options 00205 00206 declareOption( 00207 ol, "epsilon", &StatsCollector::epsilon, 00208 OptionBase::buildoption, 00209 "Small regularizing value to be added to the variance (V) estimator (and\n" 00210 "indirectly, to standard deviation (STDDEV)). This permits dividing by\n" 00211 "the standard deviation to perform a normalization, without fearing a\n" 00212 "division by zero. Forwarded from the option of the same name in\n" 00213 "VecStatsCollector if this StatsCollector belong in one.\n"); 00214 00215 declareOption( 00216 ol, "maxnvalues", &StatsCollector::maxnvalues, 00217 OptionBase::buildoption, 00218 "Maximum number of different values to keep track of in counts.\n" 00219 "If -1, we will keep track of all different values.\n" 00220 "If 0, we will only keep track of global statistics.\n"); 00221 00222 declareOption( 00223 ol, "no_removal_warnings", &StatsCollector::no_removal_warnings, 00224 OptionBase::buildoption, 00225 "If the remove_observation mecanism is used and the removed\n" 00226 "value is equal to one of last_, min_ or max_, the default\n" 00227 "behavior is to warn the user.\n" 00228 "\n" 00229 "If one want to disable this feature, he may set\n" 00230 "no_removal_warnings to true.\n" 00231 "\n" 00232 "Default: false (0)." ); 00233 00234 00235 // learnt options 00236 declareOption( 00237 ol, "nmissing_", &StatsCollector::nmissing_, 00238 OptionBase::learntoption, 00239 "number of missing values"); 00240 00241 declareOption( 00242 ol, "nnonmissing_", &StatsCollector::nnonmissing_, 00243 OptionBase::learntoption, 00244 "number of non missing value "); 00245 00246 declareOption( 00247 ol, "sumsquarew_", &StatsCollector::sumsquarew_, 00248 OptionBase::learntoption, 00249 "sum of square of all weights"); 00250 00251 declareOption( 00252 ol, "sum_", &StatsCollector::sum_, 00253 OptionBase::learntoption, 00254 "sum of all (values-first_observation)"); 00255 00256 declareOption( 00257 ol, "sumsquare_", &StatsCollector::sumsquare_, 00258 OptionBase::learntoption, 00259 "sum of square of all (values-first_observation)"); 00260 00261 declareOption( 00262 ol, "sumcube_", &StatsCollector::sumcube_, 00263 OptionBase::learntoption, 00264 "sum of cube of all (values-first_observation)"); 00265 00266 declareOption( 00267 ol, "sumfourth_", &StatsCollector::sumfourth_, 00268 OptionBase::learntoption, 00269 "sum of fourth power of all (values-first_observation)"); 00270 00271 declareOption( 00272 ol, "min_", &StatsCollector::min_, 00273 OptionBase::learntoption, 00274 "the min"); 00275 00276 declareOption( 00277 ol, "max_", &StatsCollector::max_, 00278 OptionBase::learntoption, 00279 "the max"); 00280 00281 declareOption( 00282 ol, "agmemin_", &StatsCollector::agemin_, 00283 OptionBase::learntoption, 00284 "How many observations ago the min was observed"); 00285 00286 declareOption( 00287 ol, "agemax_", &StatsCollector::agemax_, 00288 OptionBase::learntoption, 00289 "How many observations ago the max was observed"); 00290 00291 declareOption( 00292 ol, "first_", &StatsCollector::first_, 00293 OptionBase::learntoption, 00294 "first encountered observation"); 00295 00296 declareOption( 00297 ol, "last_", &StatsCollector::last_, 00298 OptionBase::learntoption, 00299 "last encountered observation"); 00300 00301 declareOption( 00302 ol, "binary_", &StatsCollector::binary_, 00303 OptionBase::learntoption, 00304 "1(true) if all seen value are binary. 0(false) otherwise" 00305 "In the case where we would have reloaded and old version" 00306 "we will calculate the result from the data in counts" 00307 "If maxnvalues==0, we are in trouble as we can't recalculate it" 00308 "So binary_==-1 and integer_==-1, but " 00309 "if we do new update, it will contain the result of only the " 00310 " new value if they change it for 0."); 00311 00312 declareOption( 00313 ol, "integer_", &StatsCollector::integer_, 00314 OptionBase::learntoption, 00315 "as binary_, execpt for integer"); 00316 00317 declareOption( 00318 ol, "counts", &StatsCollector::counts, 00319 OptionBase::learntoption, 00320 "Will contain up to 'maxnvalues' values and associated counts, as\n" 00321 "well as a last element which maps FLT_MAX, so that we do not miss\n" 00322 "anything (remains empty if maxnvalues == 0)."); 00323 00324 declareOption( 00325 ol, "count_ids", &StatsCollector::count_ids, 00326 OptionBase::learntoption | OptionBase::nosave, 00327 "Maps an id to a count value."); 00328 00329 declareOption( 00330 ol, "more_than_maxnvalues", &StatsCollector::more_than_maxnvalues, 00331 OptionBase::learntoption, 00332 "Set to 1 when more than 'maxnvalues' are seen. This is to warn the user when computing\n" 00333 "statistics that may be inaccurate when not all values are kept (e.g., LIFT)."); 00334 00335 // Now call the parent class' declareOptions 00336 inherited::declareOptions(ol); 00337 } 00338 00339 void StatsCollector::declareMethods(RemoteMethodMap& rmm) 00340 { 00341 // Insert a backpointer to remote methods; note that this 00342 // different than for declareOptions() 00343 rmm.inherited(inherited::_getRemoteMethodMap_()); 00344 declareMethod( 00345 rmm, "n", &StatsCollector::n, 00346 (BodyDoc("Returns the total number of value seen\n"), 00347 RetDoc ("n"))); 00348 00349 declareMethod( 00350 rmm, "nmissing", &StatsCollector::nmissing, 00351 (BodyDoc("Return the total number of missing value seen\n"), 00352 RetDoc ("nmissing"))); 00353 00354 declareMethod( 00355 rmm, "nnonmissing", &StatsCollector::nnonmissing, 00356 (BodyDoc("Return the total number of non missing value seen\n"), 00357 RetDoc ("nnonmissing"))); 00358 00359 declareMethod( 00360 rmm, "sumsquarew", &StatsCollector::sumsquarew, 00361 (BodyDoc("Return sumsquarew of the seen value\n"), 00362 RetDoc ("sumsquarew"))); 00363 00364 declareMethod( 00365 rmm, "sum", &StatsCollector::sum, 00366 (BodyDoc("Return sum of the seen value\n"), 00367 RetDoc ("sum"))); 00368 00369 declareMethod( 00370 rmm, "sumsquare", &StatsCollector::sumsquare, 00371 (BodyDoc("Return sumsquare of the seen value\n"), 00372 RetDoc ("sumsquare"))); 00373 00374 declareMethod( 00375 rmm, "min", &StatsCollector::min, 00376 (BodyDoc("Return the minimum value seeup to date\n"), 00377 RetDoc ("the minimum"))); 00378 00379 declareMethod( 00380 rmm, "max", &StatsCollector::max, 00381 (BodyDoc("Return the maximum value see up to date\n"), 00382 RetDoc ("the maximum"))); 00383 00384 declareMethod( 00385 rmm, "agemin", &StatsCollector::agemin, 00386 (BodyDoc("Return the agemin value\n"), 00387 RetDoc ("agemin"))); 00388 00389 declareMethod( 00390 rmm, "agemax", &StatsCollector::agemax, 00391 (BodyDoc("Return the agemax value\n"), 00392 RetDoc ("agemax"))); 00393 00394 declareMethod( 00395 rmm, "range", &StatsCollector::range, 00396 (BodyDoc("Return min - max\n"), 00397 RetDoc ("min - max"))); 00398 00399 declareMethod( 00400 rmm, "mean", &StatsCollector::mean, 00401 (BodyDoc("Return mean of the seen value\n"), 00402 RetDoc ("sum/nnonmissing"))); 00403 00404 declareMethod( 00405 rmm, "variance", &StatsCollector::variance, 00406 (BodyDoc("Return the variance of the seen value\n"), 00407 RetDoc ("variance"))); 00408 00409 declareMethod( 00410 rmm, "stddev", &StatsCollector::stddev, 00411 (BodyDoc("Return stddev of the seen value\n"), 00412 RetDoc ("stddev"))); 00413 00414 declareMethod( 00415 rmm, "skewness", &StatsCollector::skewness, 00416 (BodyDoc("Return skewness of the seen value\n"), 00417 RetDoc ("skewness"))); 00418 00419 declareMethod( 00420 rmm, "kurtosis", &StatsCollector::kurtosis, 00421 (BodyDoc("Return kurtosis of the seen value\n"), 00422 RetDoc ("kurtosis"))); 00423 00424 declareMethod( 00425 rmm, "stderror", &StatsCollector::stderror, 00426 (BodyDoc("Return stderror of the seen value\n"), 00427 RetDoc ("stderror"))); 00428 00429 declareMethod( 00430 rmm, "first_obs", &StatsCollector::first_obs, 00431 (BodyDoc("Return first_obs of the seen value\n"), 00432 RetDoc ("first_obs"))); 00433 00434 declareMethod( 00435 rmm, "last_obs", &StatsCollector::last_obs, 00436 (BodyDoc("Return last_obs of the seen value\n"), 00437 RetDoc ("last_obs"))); 00438 00439 declareMethod( 00440 rmm, "sharperatio", &StatsCollector::sharperatio, 00441 (BodyDoc("Return sharperatio of the seen value\n"), 00442 RetDoc ("sharperatio"))); 00443 00444 declareMethod( 00445 rmm, "mean_over_skewness", &StatsCollector::mean_over_skewness, 00446 (BodyDoc("Return mean_over_skewness of the seen value\n"), 00447 RetDoc ("mean_over_skewness"))); 00448 00449 declareMethod( 00450 rmm, "mean_over_skewness_ms", &StatsCollector::mean_over_skewness_ms, 00451 (BodyDoc("Return mean_over_skewness_ms of the seen value\n"), 00452 RetDoc ("mean_over_skewness_ms"))); 00453 00454 declareMethod( 00455 rmm, "mean_over_kurtosis", &StatsCollector::mean_over_kurtosis, 00456 (BodyDoc("Return mean_over_kurtosis of the seen value\n"), 00457 RetDoc ("mean_over_kurtosis"))); 00458 00459 declareMethod( 00460 rmm, "isbinary", &StatsCollector::isbinary, 00461 (BodyDoc("Return true is all value seen are binary value\n"), 00462 RetDoc ("binary_"))); 00463 00464 declareMethod( 00465 rmm, "isinteger", &StatsCollector::isinteger, 00466 (BodyDoc("Return true is all value seen are integer value\n"), 00467 RetDoc ("integer_"))); 00468 00469 declareMethod( 00470 rmm, "getCount", &StatsCollector::getCount, 00471 (BodyDoc("return the value stored in a StatsCollectorCount: (n, nbellow, sum, sumsquare, id)\n"), 00472 ArgDoc ("v", "The value of the counts to lookup.\n"), 00473 RetDoc ("Vec(n, nbellow, sum, sumsquare, id)"))); 00474 00475 } 00477 // build_ // 00479 void StatsCollector::build_() 00480 { 00481 PLASSERT( maxnvalues == -1 || maxnvalues >= 0 ); 00482 // make sure counts.size==0. If not, the object must have been loaded, and FLT_MAX is an existing key 00483 // but rounded to some precision, and there would be 2 keys approx.= FLT_MAX 00484 if(storeCounts() && counts.size()==0) 00485 counts[FLT_MAX] = StatsCollectorCounts(); 00486 00487 // If no values are kept, then we always see more than 0 values. 00488 if (maxnvalues == 0) 00489 more_than_maxnvalues = true; 00490 00491 // build count_ids 00492 count_ids.clear(); 00493 for(map<real, StatsCollectorCounts>::iterator it= counts.begin(); 00494 it != counts.end(); ++it) 00495 count_ids[it->second.id]= it->first; 00496 00497 //In case we reload an old version 00498 calculate_binary_integer(); 00499 } 00500 00502 // build // 00504 void StatsCollector::build() 00505 { 00506 inherited::build(); 00507 build_(); 00508 } 00509 00511 // forget // 00513 void StatsCollector::forget() 00514 { 00515 nmissing_ = 0.; 00516 nnonmissing_ = 0.; 00517 sumsquarew_ = 0.; 00518 sum_ = 0.; 00519 sumsquare_ = 0.; 00520 sumcube_ = 0.; 00521 sumfourth_ = 0.; 00522 min_ = MISSING_VALUE; 00523 max_ = MISSING_VALUE; 00524 agemin_ = MISSING_VALUE; 00525 agemax_ = MISSING_VALUE; 00526 first_ = last_ = MISSING_VALUE; 00527 binary_ = -1; 00528 integer_ = -1; 00529 more_than_maxnvalues = (maxnvalues == 0); 00530 approximate_counts.clear(); 00531 sorted = false; 00532 counts.clear(); 00533 build_(); 00534 } 00535 00537 // update // 00539 void StatsCollector::update(real val, real weight) 00540 { 00541 if(is_missing(val)) 00542 nmissing_ += weight; 00543 else 00544 { 00545 // Updating with an inf produces a warning for now -- many tests still 00546 // rely on this behavior, although it should be deprecated 00547 if (isinf(val)) 00548 PLWARNING("Updating a StatsCollector with an 'inf'; check for a division by zero"); 00549 00550 //sum_ += val * weight; 00551 //sumsquare_ += val*val * weight; 00552 last_ = val; 00553 if(fast_exact_is_equal(nnonmissing_,0)) { // first value encountered 00554 min_ = max_ = first_ = last_ = val; 00555 agemin_ = 0; 00556 agemax_ = 0; 00557 binary_ = true; 00558 integer_ = true; 00559 } 00560 else if(val<min_) { 00561 min_ = val; 00562 agemin_ = 0; 00563 ++agemax_; 00564 } 00565 else if(val>max_) { 00566 max_ = val; 00567 agemax_ = 0; 00568 ++agemin_; 00569 } 00570 else { 00571 ++agemax_; // works even if they are missing 00572 ++agemin_; 00573 } 00574 nnonmissing_ += weight; 00575 sumsquarew_ += weight * weight; 00576 double sqval = (val-first_)*(val-first_); 00577 sum_ += (val-first_) * weight; 00578 sumsquare_ += sqval * weight; 00579 sumcube_ += sqval*(val-first_) * weight; 00580 sumfourth_ += sqval*sqval * weight; 00581 00582 if(!(fast_exact_is_equal(val,0) ||fast_exact_is_equal(val,1))) 00583 binary_ = false; 00584 if(!fast_exact_is_equal(val,int(round(val)))) 00585 integer_ = false; 00586 00587 if (storeCounts()) 00588 { 00589 // Also remembering statistics inside values ranges. 00590 sorted = false; 00591 map<real,StatsCollectorCounts>::iterator it; 00592 if(maxnvalues == -1 || int(counts.size())<=maxnvalues) 00593 { 00594 // Still remembering new unseen values 00595 it = counts.find(val); 00596 00597 if(it==counts.end()) { 00598 // Create a new entry. 00599 // Note that doing this in a single operation is not recommended. 00600 // Indeed, depending on the compiler, counts.size() may differ by 1 00601 // because the [] operator may be called before or after. That's why 00602 // we explicitly call counts.size() first. 00603 int id = int(counts.size()); 00604 counts[val].id = id; 00605 count_ids[id]= val; 00606 } 00607 00608 counts[val].n += weight; 00609 } 00610 else // We've filled up counts already 00611 { 00612 it = counts.lower_bound(val); 00613 // TODO Should we allow approximate match? Note that it could 00614 // potentially be a bit dangerous... But also maybe necessary 00615 // when reloading a saved StatsCollector. 00616 if(fast_exact_is_equal(it->first, val)) // found the exact value 00617 it->second.n += weight; 00618 else // found the value just above val (possibly FLT_MAX) 00619 { 00620 more_than_maxnvalues = true; 00621 it->second.nbelow += weight; 00622 it->second.sum += val * weight; 00623 it->second.sumsquare += val*val * weight; 00624 } 00625 } 00626 // Erase the approximate counts if they existed previously (less 00627 // efficient, but easier to code). 00628 if (!approximate_counts.empty()) 00629 approximate_counts.clear(); 00630 } 00631 } 00632 } 00633 00635 // remove_observation // 00637 void StatsCollector::remove_observation(real val, real weight) 00638 { 00639 if(is_missing(val)) 00640 { 00641 nmissing_ -= weight; 00642 PLASSERT( nmissing_ >= 0 ); 00643 } 00644 else 00645 { 00646 sorted = false; 00647 nnonmissing_ -= weight; 00648 sumsquarew_ -= weight * weight; 00649 PLASSERT( nnonmissing_ >= 0 ); 00650 PLASSERT( sumsquarew_ >= 0 ); 00651 00652 if( !no_removal_warnings ) 00653 { 00654 if(fast_exact_is_equal(val, first_)) 00655 PLWARNING( "Removed value is equal to the first value encountered.\n" 00656 "StatsCollector::first() may not be valid anymore." ); 00657 if(fast_exact_is_equal(val, last_)) 00658 PLWARNING( "Removed value is equal to the last value encountered.\n" 00659 "StatsCollector::last() may not be valid anymore." ); 00660 if(fast_exact_is_equal(val, min_)) 00661 PLWARNING( "Removed value is equal to the min value encountered.\n" 00662 "StatsCollector::min() may not be valid anymore." ); 00663 if(fast_exact_is_equal(val, max_)) 00664 PLWARNING( "Removed value is equal to the max value encountered.\n" 00665 "StatsCollector::max() may not be valid anymore." ); 00666 } 00667 00668 double sqval = (val-first_)*(val-first_); 00669 sum_ -= (val-first_) * weight; 00670 sumsquare_ -= sqval * weight; 00671 sumcube_ -= sqval*(val-first_) * weight; 00672 sumfourth_ -= sqval*sqval * weight; 00673 00674 if(fast_exact_is_equal(nnonmissing_, 0)) { 00675 // We removed the last observation. It may be safer to reset 00676 // everything so that numerical approximations do not lead to 00677 // negative values for statistics that should always be 00678 // positive. We don't call forget() since missing values' count 00679 // would be lost... 00680 min_ = max_ = agemin_ = agemax_ = first_ = last_ = MISSING_VALUE; 00681 sum_ = sumsquare_ = sumcube_ = sumfourth_ = sumsquarew_ = 0.0; 00682 } 00683 00684 // assertion is after previous check for nnonmissing_, since the last 00685 // subtraction of sumsquare might have left sumsquare very slightly 00686 // negative due to roundoff errors 00687 if (-SQRT_ABSOLUTE_TOLERANCE < sumsquare_ && sumsquare_ < 0.0) 00688 sumsquare_ = 0.0; 00689 if (-SQRT2_ABSOLUTE_TOLERANCE < sumfourth_ && sumfourth_ < 0.0) 00690 sumfourth_ = 0.0; 00691 if ( sumsquare_ < 0.0 || sumfourth_ < 0.0 ) 00692 { 00693 perr << "this = " << endl << *this << endl << endl; 00694 PLERROR("Improper call to remove_observation " 00695 "sumsquare_ = %g < 0.0 || sumfourth_ = %g < 0.0", sumsquare_, sumfourth_); 00696 } 00697 00698 if(storeCounts()) 00699 { 00700 if ( maxnvalues > 0 ) 00701 PLERROR("The remove observation mechanism is incompatible with " 00702 "maxnvalues > 0."); 00703 00704 // Find the associated count and decrement. Note that I do not 00705 // verify whether the count reaches 0.0. A null count does not have 00706 // any impact on pseudo_quantile() while removing the element from 00707 // the map could mess up with ids... 00708 counts[val].n -= weight; 00709 } 00710 } 00711 } 00712 00714 // getApproximateCounts // 00716 map<real, StatsCollectorCounts>* StatsCollector::getApproximateCounts() 00717 { 00718 if (!approximate_counts.empty()) 00719 return &approximate_counts; 00720 map<real, StatsCollectorCounts>::const_iterator it_begin, it_current, it; 00721 it_begin = counts.begin(); 00722 while (it_begin != counts.end()) { 00723 real val_begin = it_begin->first; 00724 it_current = it_begin; 00725 it_current++; 00726 while (it_current != counts.end() && 00727 is_equal(val_begin, it_current->first)) it_current++; 00728 // Merge keys between 'begin' and 'current'. 00729 StatsCollectorCounts sc = it_begin->second; 00730 it = it_begin; 00731 for (it++; it != it_current; it++) { 00732 sc.n += it->second.n; 00733 sc.nbelow += it->second.nbelow; 00734 sc.sum += it->second.sum; 00735 sc.sumsquare += it->second.sumsquare; 00736 } 00737 approximate_counts[val_begin] = sc; 00738 it_begin = it_current; 00739 } 00740 return &approximate_counts; 00741 } 00742 00744 // getBinMapping // 00746 RealMapping StatsCollector::getBinMapping(double discrete_mincount, 00747 double continuous_mincount, 00748 real tolerance, 00749 TVec<double> * fcount) const 00750 { 00751 real mapto=0.; 00752 RealMapping mapping; 00753 mapping.setMappingForOther(-1); 00754 map<real,StatsCollectorCounts>::const_iterator it = counts.begin(); 00755 int nleft = int(counts.size())-1; // loop on all but last 00756 00757 if(fcount) 00758 { 00759 (*fcount) = TVec<double>(); 00760 // ouch, assume discrete_mincount == continuous_mincount 00761 fcount->resize(0, int(2.*nnonmissing_ / discrete_mincount)); 00762 fcount->append(nmissing_); 00763 fcount->append(0); 00764 } 00765 00766 double count = 0, count2 = 0; 00767 real low = min_; 00768 real high = min_; 00769 bool low_has_been_appended = false; 00770 // ProgressBar pb("Computing PseudoQ Mapping...",counts.size()-1); 00771 00772 while(nleft--) 00773 { 00774 high = it->first; 00775 // pb(counts.size()-1-nleft); 00776 count += it->second.nbelow; 00777 count2 += it->second.nbelow; 00778 // cerr << "it->first:"<<it->first<<" nbelow:"<<it->second.nbelow<<" n:"<<it->second.n<<endl; 00779 if(count>=continuous_mincount) 00780 { 00781 // append continuous range 00782 mapping.addMapping( 00783 RealRange(low_has_been_appended?']':'[',low, high, '['), 00784 mapto++); 00785 if(fcount) 00786 fcount->append(count); 00787 low = high; 00788 low_has_been_appended = false; 00789 count = 0; 00790 00791 } 00792 00793 if(it->second.n >= discrete_mincount) 00794 { 00795 if(count>0) // then append the previous continuous range 00796 { 00797 mapping.addMapping(RealRange(low_has_been_appended?']':'[',low, high, '['), mapto++); 00798 if(fcount) 00799 fcount->append(count); 00800 count = 0; 00801 } 00802 // append discrete point 00803 mapping.addMapping(RealRange('[',high,high,']'), mapto++); 00804 if(fcount) 00805 fcount->append(it->second.n + count); 00806 count2+=it->second.n; 00807 count=0; 00808 low = high; 00809 low_has_been_appended = true; 00810 } 00811 else 00812 { 00813 count2+=it->second.n; 00814 count += it->second.n; 00815 } 00816 ++it; 00817 } 00818 00819 if(it->first<=max_) 00820 PLERROR("Bug in StatsCollector::getBinMapping expected last element of mapping to be FLT_MAX..."); 00821 00822 if (mapping.size() == 0) 00823 { 00824 PLWARNING("StatsCollector::getBinMapping: no mapping were created; probably a bug"); 00825 mapping.addMapping(RealRange('[',min_,max_,']'), 0); 00826 return mapping; 00827 } 00828 00829 // make sure we include max_ 00830 pair<RealRange, real> m = mapping.lastMapping(); 00831 00832 // cnt is the number of elements that would be in the last bin 00833 double cnt = nnonmissing_ - count2 + count; 00834 00835 // If the bin we're about to add is short of less then tolerance*100% of continuous_mincount elements, 00836 // OR if the last we added is a discrete point AND the max is not already in the last range, we append it 00837 if(m.first.high<max_) 00838 { 00839 if( ((real)cnt/(real)continuous_mincount)>(1.-tolerance) || 00840 (fast_exact_is_equal(m.first.low, m.first.high))) 00841 { 00842 // don't join last bin with last-but-one bin 00843 mapping.addMapping(RealRange(m.first.rightbracket=='[' ? '[' : ']',m.first.high,max_,']'), 00844 mapto++); 00845 if(fcount) 00846 fcount->append(cnt); 00847 } 00848 else 00849 { 00850 // otherwise, we can join it with the previous 00851 mapping.removeMapping(m.first); 00852 mapping.addMapping(RealRange(m.first.leftbracket, m.first.low, max_, ']'), 00853 m.second); 00854 if(fcount) 00855 { 00856 double v = fcount->back(); 00857 fcount->pop_back(); 00858 fcount->append(v+cnt); 00859 } 00860 } 00861 } 00862 else if(fast_exact_is_equal(m.first.high, max_)) // make sure we have a closing bracket on the max_ 00863 { 00864 mapping.removeMapping(m.first); 00865 mapping.addMapping(RealRange(m.first.leftbracket, m.first.low, max_, ']'), 00866 m.second); 00867 } 00868 return mapping; 00869 } 00870 00871 00873 // getAllValuesMapping // 00875 RealMapping StatsCollector::getAllValuesMapping(TVec<double> * fcount) const 00876 { 00877 return getAllValuesMapping(0,fcount); 00878 } 00879 00880 RealMapping StatsCollector::getAllValuesMapping(TVec<bool>* to_be_included, 00881 TVec<double>* fcount, bool ignore_other, 00882 real tolerance) const { 00883 RealMapping mapping; 00884 if (ignore_other) { 00885 mapping.keep_other_as_is = false; 00886 mapping.other_mapsto = -1; 00887 } 00888 int i = 0; 00889 int k = 0; 00890 if(fcount) 00891 { 00892 (*fcount) = TVec<double>(); 00893 fcount->resize(0,int(counts.size())+2); 00894 fcount->append(nmissing_); 00895 fcount->append(0); 00896 } 00897 00898 double count=0; 00899 00900 real epsilon = 0; 00901 if (tolerance > 0) { 00902 // Compute the expansion coefficient 'epsilon'. 00903 StatsCollector values_diff; 00904 for (map<real,StatsCollectorCounts>::const_iterator it = counts.begin(); 00905 size_t(i) < counts.size() - 2; i++) { 00906 real val1 = it->first; 00907 it++; 00908 real val2 = it->first; 00909 values_diff.update(val2 - val1); 00910 } 00911 // Mean of the difference between two consecutive values. 00912 real mean = values_diff.mean(); 00913 epsilon = tolerance * mean; 00914 if (epsilon < 0) { 00915 PLERROR("In StatsCollector::getAllValuesMapping - epsilon < 0, there must be something wrong"); 00916 } 00917 } 00918 00919 i = 0; 00920 00921 for(map<real,StatsCollectorCounts>::const_iterator it = counts.begin() ; 00922 size_t(i) < counts.size() - 1; ++it) 00923 { 00924 real low_val = it->first - epsilon; 00925 real up_val = it->first + epsilon; 00926 map<real,StatsCollectorCounts>::const_iterator itup = it; 00927 itup++; 00928 int j = i + 1; 00929 bool to_include = true; 00930 if (to_be_included) { 00931 to_include = (*to_be_included)[i]; 00932 } 00933 real count_in_range = it->second.n; 00934 if (tolerance > 0) { 00935 for (; itup != counts.end(); itup++) { 00936 if (itup->first - epsilon <= up_val) { 00937 // The next mapping needs to be merged with the current one. 00938 if (fcount) { 00939 PLWARNING("In StatsCollector::getAllValuesMapping - You are using fcount and some ranges are merged. " 00940 "This case has not been tested yet. Please remove this warning if it works fine."); 00941 } 00942 up_val = itup->first + epsilon; 00943 count_in_range += itup->second.n; 00944 if (to_be_included) { 00945 // As long as one of the merged mappings needs to be included, 00946 // we include the result of the merge. 00947 to_include = to_include || (*to_be_included)[j]; 00948 } 00949 j++; 00950 } else { 00951 // No merging. 00952 break; 00953 } 00954 } 00955 } 00956 // Because the last one won't be merged (even if all are merged, the one 00957 // with FLT_MAX won't). 00958 itup--; 00959 it = itup; 00960 i = j - 1; 00961 00962 if (to_include) { 00963 mapping.addMapping(RealRange('[',low_val,up_val,']'),k); 00964 k++; 00965 if(fcount) 00966 { 00967 count += count_in_range; 00968 fcount->append(count_in_range); 00969 } 00970 } 00971 i++; 00972 } 00973 00974 if(fcount) 00975 (*fcount)[1] = nnonmissing_ - count; 00976 return mapping; 00977 } 00978 00980 // cdf // 00982 Mat StatsCollector::cdf(bool normalized) const 00983 { 00984 int l = 2*(int)counts.size(); 00985 00986 Mat xy(l+1,2); 00987 int i=0; 00988 double currentcount = 0; 00989 xy(i,0) = min_; 00990 xy(i++,1) = 0; 00991 map<real,StatsCollectorCounts>::const_iterator it = counts.begin(); 00992 map<real,StatsCollectorCounts>::const_iterator itend = counts.end(); 00993 for(; it!=itend; ++it) 00994 { 00995 real val = it->first; 00996 if(val>max_) 00997 val = max_; 00998 00999 currentcount += it->second.nbelow; 01000 xy(i,0) = val; 01001 xy(i++,1) = currentcount; 01002 01003 currentcount += it->second.n; 01004 xy(i,0) = val; 01005 xy(i++,1) = currentcount; 01006 } 01007 if(normalized) 01008 xy.column(1) /= real(nnonmissing_); 01009 01010 return xy; 01011 } 01012 01013 real StatsCollector::pseudo_quantile(real q) const 01014 { 01015 // Basic strategy is to iterate over the bins and stop when the fraction 01016 // of total observations crosses q. Then we linearly interpolate between 01017 // the previous bin and the current one. 01018 map<real,StatsCollectorCounts>::const_iterator 01019 it = counts.begin(), end = counts.end(); 01020 real previous_total = 0.0; 01021 real current_total = MISSING_VALUE; 01022 real previous_position = MISSING_VALUE; 01023 if (fast_exact_is_equal(nnonmissing_, 0)) 01024 return MISSING_VALUE; 01025 01026 for ( ; it != end ; ++it ) { 01027 current_total = previous_total + it->second.n + it->second.nbelow; 01028 if (is_missing(current_total) || 01029 current_total / nnonmissing_ >= q) 01030 break; 01031 previous_total = current_total; 01032 previous_position = it->first; 01033 } 01034 01035 // Boudary case if we did not collect any count statistics 01036 if (is_missing(current_total)) 01037 return MISSING_VALUE; 01038 01039 // If we stopped at the first bin, do not interpolate with previous bin 01040 PLASSERT( it != end ); 01041 if (is_missing(previous_position)) 01042 return it->first; 01043 01044 // If we stopped at last bin, do not interpolate with current bin which 01045 // should be equal to FLT_MAX 01046 if (fast_exact_is_equal(it->first, FLT_MAX)) 01047 return previous_position; 01048 01049 // Otherwise, interpolate linearly between previous_position and 01050 // current_position 01051 real current_position = it->first; 01052 real slope = (current_position - previous_position) / 01053 (current_total - previous_total); 01054 return slope * (q * nnonmissing_ - previous_total) + previous_position; 01055 } 01056 01057 void StatsCollector::newwrite(PStream& out) const 01058 { 01059 switch(out.outmode) 01060 { 01061 case PStream::raw_ascii: 01062 case PStream::pretty_ascii: 01063 { 01064 map<real,StatsCollectorCounts>::const_iterator it = counts.begin(); 01065 map<real,StatsCollectorCounts>::const_iterator itend = counts.end(); 01066 for(; it!=itend; ++it) 01067 { 01068 out << "value: " << it->first 01069 << " #equal:" << it->second.n 01070 << " #less:" << it->second.nbelow 01071 << " avg_of_less:" << it->second.sum/it->second.nbelow 01072 << " % of non missing:"<< (real(it->second.n)/nnonmissing()) 01073 << endl; 01074 } 01075 out << "\n# samples: " << n() << "\n"; 01076 out << "# missing: " << nmissing() << "\n"; 01077 out << "mean: " << mean() << "\n"; 01078 out << "stddev: " << stddev() << "\n"; 01079 out << "stderr: " << stderror() << "\n"; 01080 out << "min: " << min() << "\n"; 01081 out << "max: " << max() << "\n\n"; 01082 out << "first: " << first_obs() << "\n"; 01083 out << "last: " << last_obs() << "\n\n"; 01084 out << "counts size: " << (unsigned int) counts.size() << "\n"; 01085 break; 01086 } 01087 default: 01088 inherited::newwrite(out); 01089 } 01090 } 01091 01092 // TODO Remove this (apparently) deprecated method? 01093 void StatsCollector::oldwrite(ostream& out) const 01094 { 01095 writeHeader(out,"StatsCollector",0); 01096 writeField(out, "nmissing_", nmissing_); 01097 writeField(out, "nnonmissing_", nnonmissing_); 01098 writeField(out, "sum_", sum_); 01099 writeField(out, "sumsquare_", sumsquare_); 01100 writeField(out, "min_", min_); 01101 writeField(out, "max_", max_); 01102 writeField(out, "maxnvalues", maxnvalues); 01103 01104 writeFieldName(out, "counts"); 01105 PLearn::write(out, (int)counts.size()); 01106 writeNewline(out); 01107 map<real,StatsCollectorCounts>::const_iterator it = counts.begin(); 01108 map<real,StatsCollectorCounts>::const_iterator itend = counts.end(); 01109 for(; it!=itend; ++it) 01110 { 01111 PLearn::write(out, it->first); 01112 PLearn::write(out, it->second.n); 01113 PLearn::write(out, it->second.nbelow); 01114 PLearn::write(out, it->second.sum); 01115 PLearn::write(out, it->second.sumsquare); 01116 writeNewline(out); 01117 } 01118 writeFooter(out,"StatsCollector"); 01119 } 01120 01122 // getStat // 01127 real StatsCollector::getStat(const string& statname) const 01128 { 01129 typedef real (StatsCollector::*STATFUN)() const; 01130 static bool init = false; 01131 static map<string,STATFUN> statistics; 01132 if (!init) { 01133 //the two if(!init) is volontary not to acquire a lock at each fct call 01134 #pragma omp critical 01135 if(!init){ 01136 init = true; 01137 statistics["E"] = STATFUN(&StatsCollector::mean); 01138 statistics["V"] = STATFUN(&StatsCollector::variance); 01139 statistics["STDDEV"] = STATFUN(&StatsCollector::stddev); 01140 statistics["STDERROR"] = STATFUN(&StatsCollector::stderror); 01141 statistics["SKEW"] = STATFUN(&StatsCollector::skewness); 01142 statistics["KURT"] = STATFUN(&StatsCollector::kurtosis); 01143 statistics["MIN"] = STATFUN(&StatsCollector::min); 01144 statistics["MAX"] = STATFUN(&StatsCollector::max); 01145 statistics["AGEMIN"] = STATFUN(&StatsCollector::agemin); 01146 statistics["AGEMAX"] = STATFUN(&StatsCollector::agemax); 01147 statistics["RANGE"] = STATFUN(&StatsCollector::range); 01148 statistics["SUM"] = STATFUN(&StatsCollector::sum); 01149 statistics["SUMSQ"] = STATFUN(&StatsCollector::sumsquare); 01150 statistics["FIRST"] = STATFUN(&StatsCollector::first_obs); 01151 statistics["LAST"] = STATFUN(&StatsCollector::last_obs); 01152 statistics["N"] = STATFUN(&StatsCollector::n); 01153 statistics["NMISSING"] = STATFUN(&StatsCollector::nmissing); 01154 statistics["NNONMISSING"] = STATFUN(&StatsCollector::nnonmissing); 01155 statistics["SHARPERATIO"] = STATFUN(&StatsCollector::sharperatio); 01156 statistics["EoverSKEW"] = STATFUN(&StatsCollector::mean_over_skewness); 01157 statistics["EoverSKEWms"] = STATFUN(&StatsCollector::mean_over_skewness_ms); 01158 statistics["EoverKURT"] = STATFUN(&StatsCollector::mean_over_kurtosis); 01159 statistics["ZSTAT"] = STATFUN(&StatsCollector::zstat); 01160 statistics["PZ1t"] = STATFUN(&StatsCollector::zpr1t); 01161 statistics["PZ2t"] = STATFUN(&StatsCollector::zpr2t); 01162 statistics["IQR"] = STATFUN(&StatsCollector::iqr); 01163 statistics["PRR"] = STATFUN(&StatsCollector::prr); 01164 statistics["NIPS_LIFT"] = STATFUN(&StatsCollector::nips_lift); 01165 statistics["MEAN_LIFT"] = STATFUN(&StatsCollector::mean_lift); 01166 statistics["PRBP"] = STATFUN(&StatsCollector::prbp); 01167 statistics["DMODE"] = STATFUN(&StatsCollector::dmode); 01168 } 01169 } 01170 01171 // Special case :: interpret the PSEUDOQ(xx) and LIFT(xxx) forms 01172 if (statname.substr(0,7) == "PSEUDOQ") { 01173 PStream in = openString(statname, PStream::plearn_ascii); 01174 string dummy; 01175 in.smartReadUntilNext("(", dummy); 01176 string quantile_str; 01177 in.smartReadUntilNext(")", quantile_str); 01178 real q = toreal(quantile_str); 01179 return pseudo_quantile(q); 01180 } else if (statname.substr(0, 5) == "LIFT(") { 01181 PStream in = openString(statname, PStream::plearn_ascii); 01182 string dummy; 01183 in.smartReadUntilNext("(", dummy); 01184 string fraction_str; 01185 in.smartReadUntilNext(")", fraction_str); 01186 real fraction = toreal(fraction_str); 01187 int dummy_int; 01188 return -100 * lift(int(round(fraction * nnonmissing())), dummy_int); 01189 } 01190 01191 map<string,STATFUN>::iterator fun = statistics.find(statname); 01192 if (fun == statistics.end()) 01193 PLERROR("In StatsCollector::getStat, invalid statname '%s'", 01194 statname.c_str()); 01195 else 01196 return (this->*(fun->second))(); 01197 return 0; 01198 } 01199 01201 // skewness // 01203 real StatsCollector::skewness() const 01204 { 01205 // numerator 01206 double diff = first_ - mean(); 01207 double numerator = sumcube_/nnonmissing_ + 01208 (3*sumsquare_/nnonmissing_ + diff*(3*(sum_/nnonmissing_) + diff))*diff; 01209 01210 // denominator 01211 double denominator = stddev(); 01212 denominator *= denominator * denominator; 01213 return numerator / denominator; 01214 } 01215 01217 // kurtosis // 01219 real StatsCollector::kurtosis() const 01220 { 01221 // numerator 01222 double diff = first_ - mean(); 01223 double numerator = sumfourth_/nnonmissing_ + 01224 (4*sumcube_/nnonmissing_ + 01225 (6*sumsquare_/nnonmissing_ + diff*(4*sum_/nnonmissing_+diff)) * diff) 01226 * diff; 01227 01228 // denominator 01229 double denominator = stddev(); 01230 denominator *= denominator; 01231 denominator *= denominator; 01232 return numerator / denominator - 3.0; 01233 } 01234 01236 // sharperatio // 01238 real StatsCollector::sharperatio() const 01239 { 01240 // Be careful because due to numerical errors, it is possible to get data 01241 // series with extremely small returns and standard deviations, where we 01242 // would be expecting a SharpeRatio of "exactly" 0.0. 01243 real m = mean(); 01244 real s = stddev(); 01245 if (is_missing(m) || is_missing(s)) 01246 return MISSING_VALUE; 01247 else if (is_equal(m, 0.0) || is_equal(s, 0.0)) 01248 return 0.0; 01249 else 01250 return m/s; 01251 } 01252 01254 // mean_over_skewness_ms // 01256 real StatsCollector::mean_over_skewness_ms() const 01257 { 01258 real m = mean(); 01259 real s = skewness(); 01260 if (m > 0 && s > 0) 01261 return m / s; 01262 else 01263 return - fabs(m / s); 01264 } 01265 01267 // lift // 01269 real StatsCollector::lift(int k, int& n_pos_in_k, int n_pos_in_k_minus_1, real pos_fraction) const 01270 { 01271 if (more_than_maxnvalues) 01272 PLWARNING("In StatsCollector::lift - You need to increase 'maxnvalues'" 01273 " (or set it to -1) to get an accurate statistic"); 01274 if (k <= 0) 01275 PLERROR("In StatsCollector::lift - It makes no sense to compute a lift with k <= 0"); 01276 if (!sorted) 01277 sort_values_by_magnitude(); 01278 if (n_pos_in_k_minus_1 < 0) 01279 // We are not given the number of positive examples in the first (k-1) 01280 // examples, thus we need to compute it ourselves. 01281 n_pos_in_k = int(round(PLearn::sum(sorted_values.subMat(0, 1, k, 1)))); 01282 else 01283 n_pos_in_k = n_pos_in_k_minus_1 + int(sorted_values(k - 1, 1)); 01284 if (pos_fraction < 0) 01285 // We are not given the fraction of positive examples. 01286 pos_fraction = int(round(PLearn::sum(sorted_values.column(1)))) / real(sorted_values.length()); 01287 return real(n_pos_in_k) / (k * pos_fraction); 01288 } 01289 01291 // nips_lift // 01293 real StatsCollector::nips_lift() const 01294 { 01295 real pos_fraction; 01296 real result = - mean_lift(&pos_fraction); 01297 real max_performance = 0.5 * (1 / pos_fraction - 1) * (pos_fraction + 1) + 1; 01298 result = (max_performance - result) / max_performance; 01299 return result; 01300 } 01301 01303 // mean_lift // 01305 real StatsCollector::mean_lift(real* pos_fraction) const 01306 { 01307 if (more_than_maxnvalues) 01308 PLWARNING("In StatsCollector::mean_lift - You need to increase " 01309 "'maxnvalues' (or set it to -1) to get an accurate " 01310 "statistic"); 01311 if (!sorted) 01312 sort_values_by_magnitude(); 01313 real n_total = real(sorted_values.length()); 01314 real pos_f = int(round(PLearn::sum(sorted_values.column(1)))) / n_total; 01315 if (pos_fraction) 01316 *pos_fraction = pos_f; 01317 int n_pos_in_k_minus_1 = -1; 01318 real result = 0; 01319 for (int k = 0; k < sorted_values.length(); k++) 01320 result += lift(k + 1, n_pos_in_k_minus_1, n_pos_in_k_minus_1, pos_f); 01321 result /= n_total; 01322 return -result; 01323 } 01324 01326 // prbp // 01328 real StatsCollector::prbp() const 01329 { 01330 if (more_than_maxnvalues) 01331 PLWARNING("In StatsCollector::prbp - You need to increase 'maxnvalues'" 01332 " (or set it to -1) to get an accurate statistic"); 01333 if (!sorted) 01334 sort_values_by_magnitude(); 01335 int n_pos = int(round(PLearn::sum(sorted_values.column(1)))); 01336 int n_pos_at_prbp = int(round(PLearn::sum(sorted_values.subMat(0, 1, n_pos, 1)))); 01337 return - 100 * n_pos_at_prbp / real(n_pos); 01338 } 01339 01340 01342 // dmode // 01344 real StatsCollector::dmode() const 01345 { 01346 Vec ret = dmodes(); 01347 if(ret.length() == 0) 01348 return MISSING_VALUE; 01349 return ret[0]; 01350 } 01351 01352 Vec StatsCollector::dmodes() const 01353 { 01354 Vec cargmax(0); 01355 real cmax = -1; 01356 01357 map<real,StatsCollectorCounts>::const_iterator it = counts.begin(); 01358 map<real,StatsCollectorCounts>::const_iterator itend = counts.end(); 01359 for(; it!=itend; ++it) 01360 { 01361 if(it->second.n > cmax) 01362 cmax = it->second.n; 01363 } 01364 01365 it = counts.begin(); 01366 for(; it!=itend; ++it) 01367 { 01368 if(fast_exact_is_equal(it->second.n, cmax)) 01369 cargmax.push_back(it->first); 01370 } 01371 01372 return cargmax; 01373 } 01374 01376 // sort_values_by_magnitude // 01378 void StatsCollector::sort_values_by_magnitude() const 01379 { 01380 sorted_values.resize(0, 2); 01381 Vec to_add(2); 01382 real val; 01383 for (map<real,StatsCollectorCounts>::const_iterator it = counts.begin(); 01384 it != counts.end(); it++) { 01385 val = it->first; 01386 to_add[0] = fabs(val); 01387 to_add[1] = val > 0 ? 1 : 0; 01388 for (int i = 0; i < it->second.n; i++) 01389 sorted_values.appendRow(to_add); 01390 } 01391 // The STL map may have somehow performed some kind of sort, which could 01392 // lead to a very specific sort when some predictions are equal (instead of 01393 // a random one). Thus we make sure everything is shuffled first. 01394 shuffleRows(sorted_values); 01395 sortRows(sorted_values, 0, false); // Sort by decreasing order of first column. 01396 sorted = true; 01397 } 01398 01400 // computeRanges // 01402 TVec<RealMapping> computeRanges(TVec<StatsCollector> stats, int discrete_mincount, int continuous_mincount) 01403 { 01404 TVec<RealMapping> ranges; 01405 int n = stats.length(); 01406 ranges.resize(n); 01407 for(int k=0; k<n; k++) 01408 ranges[k] = stats[k].getBinMapping(discrete_mincount, continuous_mincount); 01409 return ranges; 01410 } 01411 01412 real StatsCollector::zpr1t() const 01413 { 01414 real m = mean(), v = variance(); 01415 if (is_missing(m) || is_missing(v)) 01416 return MISSING_VALUE; 01417 else 01418 return p_value(mean(), variance()/nnonmissing()); 01419 } 01420 01421 real StatsCollector::zpr2t() const 01422 { 01423 return 2 * zpr1t(); 01424 } 01425 01426 void StatsCollector::merge(const StatsCollector& other) 01427 { 01428 if(storeCounts() && other.maxnvalues != -1) 01429 PLERROR("Cannot merge stats collectors w/counts if 'other' stats col. has maxnvalues != -1"); 01430 01431 if(fast_exact_is_equal(nnonmissing_,0)) // this was empty before merge 01432 { 01433 min_= other.min_; 01434 max_= other.max_; 01435 first_= other.first_; 01436 last_= other.last_; 01437 } 01438 01439 sum_+= other.sum() - first_*other.nnonmissing_; 01440 double first2= first_*first_; 01441 sumsquare_+= other.sumsquare() - 2.0*first_*other.sum() + first2*other.nnonmissing_; 01442 double ofirst2= other.first_*other.first_; 01443 double osum3= other.sumcube_ + 3.0*other.first_*other.sumsquare() 01444 - 3.0*ofirst2*other.sum() + ofirst2*other.first_*other.nnonmissing_; 01445 sumcube_+= osum3 - 3.0*first_*other.sumsquare() 01446 + 3.0*first2*other.sum() - first2*first_*other.nnonmissing_; 01447 double osum4= other.sumfourth_ + 4.0*other.first_*osum3 - 6.0*ofirst2*other.sumsquare() 01448 + 4.0*other.first_*ofirst2*other.sum() - ofirst2*ofirst2*other.nnonmissing_; 01449 sumfourth_+= osum4 - 4.0*first_*osum3 + 6.0*first2*other.sumsquare() 01450 - 4.0*first_*first2*other.sum() + first2*first2*other.nnonmissing_; 01451 01452 nmissing_+= other.nmissing_; 01453 nnonmissing_+= other.nnonmissing_; 01454 sumsquarew_+= other.sumsquarew_; 01455 01456 // In merging first/last/ages, we assume that 'this' comes first, and 01457 // 'other' comes last. 01458 if (other.min_ < min_) { 01459 min_ = other.min_; 01460 agemin_ = other.agemin_; 01461 } 01462 else { 01463 agemin_ += other.n(); 01464 } 01465 01466 if (other.max_ > max_) { 01467 max_ = other.max_; 01468 agemax_ = other.agemax_; 01469 } 01470 else { 01471 agemax_ += other.n(); 01472 } 01473 last_= other.last_; // assume this is first and other is last. 01474 sorted = false; 01475 01476 if (storeCounts())//now merge counts 01477 { 01478 int nextid= 0; 01479 set<real> already_merged; 01480 map<real,StatsCollectorCounts>::iterator it; 01481 map<real,StatsCollectorCounts>::const_iterator ito; 01482 map<int, real>::const_iterator iti; 01483 while(nextid < int(other.counts.size()) && (maxnvalues == -1 || int(counts.size()) <= maxnvalues)) 01484 {// merge counts with smallest ids until maxnvalues reached 01485 01486 iti= other.count_ids.find(nextid); 01487 if(iti == other.count_ids.end()) 01488 { 01489 PLWARNING("Can't find count id %d", nextid); 01490 break; 01491 } 01492 real val= iti->second; 01493 ito= other.counts.find(val); 01494 if(ito == other.counts.end()) 01495 { 01496 PLWARNING("Can't find count id %d, val %f", nextid, val); 01497 break; 01498 } 01499 01500 int newid= int(counts.size()); 01501 01502 it= counts.find(val); 01503 if(it != counts.end()) 01504 it->second.merge(ito->second); 01505 else 01506 { 01507 counts[val]= ito->second; 01508 counts[val].id= newid; 01509 count_ids[newid]= val; 01510 } 01511 ++nextid; 01512 already_merged.insert(val); 01513 } 01514 01515 for(ito= other.counts.begin(); ito != other.counts.end(); ++ito) 01516 { 01517 real val= ito->first; 01518 if(already_merged.count(val) == 0)//skip those merged earlier 01519 { 01520 it= counts.find(val); 01521 if(it != counts.end()) 01522 it->second.merge(ito->second); 01523 else if(maxnvalues == -1 || int(counts.size()) <= maxnvalues) 01524 { 01525 int id= int(counts.size()); 01526 counts[val]= ito->second; 01527 counts[val].id= id; 01528 count_ids[id]= val; 01529 } 01530 else 01531 { 01532 more_than_maxnvalues= true; 01533 it= counts.lower_bound(val); 01534 real weight= ito->second.n; 01535 it->second.nbelow+= ito->second.nbelow + weight; 01536 it->second.sum+= val*weight;//ito->second.sum; 01537 it->second.sumsquare+= val*val*weight;//ito->second.sumsquare; 01538 } 01539 } 01540 } 01541 } 01542 if (!approximate_counts.empty()) approximate_counts.clear(); 01543 } 01544 01545 void StatsCollector::calculate_binary_integer() 01546 { 01547 if(binary_==-1 && maxnvalues!=0 && nnonmissing_>0) 01548 { 01549 PLCHECK(integer_==-1); 01550 binary_ = true; 01551 integer_ = true; 01552 for(map<real, StatsCollectorCounts>::iterator it = counts.begin(); 01553 it!=counts.end();it++) 01554 { 01555 if(it->second.n!=0) 01556 { 01557 if(!(fast_exact_is_equal(it->first,0)|| 01558 fast_exact_is_equal(it->first,1))) 01559 binary_ = false; 01560 if(!fast_exact_is_equal(int(round(it->first)),it->first)){ 01561 integer_ = false; 01562 break; 01563 } 01564 } 01565 } 01566 if((binary_||integer_)&&more_than_maxnvalues) 01567 PLWARNING("In StatsCollector::calculate_binary_integer() - " 01568 "Reloading an old StatsCollector. While recalculating data for isbinary() and isinteger(), we found a possible error case. The StatsCollector have more value then maxnvalues(%d), but we are still thinking it is a binary or an integer. This can be false.",maxnvalues); 01569 } 01570 else if(maxnvalues==0 && nnonmissing()>0 && -1==binary_ && -1==integer_) 01571 PLWARNING("In StatsCollector::calculate_binary_integer() - " 01572 "Reloadind old StatsCollector with maxnvalues==0 and " 01573 "nnonmissing()>0. This cause trouble as we can't recompute" 01574 "the data for the function isbinary() and isinteger()" 01575 ); 01576 } 01577 } // end of namespace PLearn 01578 01579 01580 /* 01581 Local Variables: 01582 mode:c++ 01583 c-basic-offset:4 01584 c-file-style:"stroustrup" 01585 c-file-offsets:((innamespace . 0)(inline-open . 0)) 01586 indent-tabs-mode:nil 01587 fill-column:79 01588 End: 01589 */ 01590 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :