PLearn 0.1
PLS.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLS.cc
00004 //
00005 // Copyright (C) 2004 Olivier Delalleau 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: PLS.cc 7042 2007-05-09 23:44:20Z saintmlx $ 
00037  ******************************************************* */
00038 
00039 // Authors: Olivier Delalleau
00040 
00043 #define PL_LOG_MODULE_NAME "PLS"
00044 
00045 // From PLearn
00046 #include "PLS.h"
00047 #include <plearn/io/pl_log.h>
00048 #include <plearn/math/TMat_maths.h>    
00049 #include <plearn/math/pl_erf.h>
00050 #include <plearn/math/plapack.h>
00051 #include <plearn/vmat/ShiftAndRescaleVMatrix.h>
00052 #include <plearn/vmat/SubVMatrix.h>
00053 #include <plearn/vmat/VMat_linalg.h>
00054 
00055 namespace PLearn {
00056 using namespace std;
00057 
00058 PLS::PLS() 
00059     : m(-1),
00060       p(-1),
00061       k(1),
00062       method("kernel"),
00063       precision(1e-6),
00064       output_the_score(false),
00065       output_the_target(true),
00066       compute_confidence(false)
00067 {}
00068 
00069 PLEARN_IMPLEMENT_OBJECT(
00070     PLS,
00071     "Partial Least Squares Regression (PLSR).",
00072     "You can use this learner to perform regression, and / or dimensionality\n"
00073     "reduction.\n"
00074     "PLS regression assumes the target Y and the data X are linked through:\n"
00075     " Y = T.Q' + E\n"
00076     " X = T.P' + F\n"
00077     "The underlying coefficients T (the 'scores') and the loading matrices\n"
00078     "Q and P are seeked. It is then possible to compute the prediction y for\n"
00079     "a new input x, as well as its score vector t (its representation in\n"
00080     "lower-dimensional coordinates).\n"
00081     "The available algorithms to perform PLS (chosen by the 'method' option) are:\n"
00082     "\n"
00083     " ====  PLS1  ====\n"
00084     "The classical PLS algorithm, suitable only for a 1-dimensional target. The\n"
00085     "following algorithm is taken from 'Factor Analysis in Chemistry', with an\n"
00086     "additional loop that (I believe) was missing:\n"
00087     " (1) Let X (n x p) = the centered and normalized input data\n"
00088     "     Let y (n x 1) = the centered and normalized target data\n"
00089     "     Let k be the number of components extracted\n"
00090     " (2) s = y\n"
00091     " (3) lx' = s' X, s = X lx (normalized)\n"
00092     " (4) If s has changed by more than 'precision', loop to (3)\n"
00093     " (5) ly = s' y\n"
00094     " (6) lx' = s' X\n"
00095     " (7) Store s, lx and ly in the columns of respectively T, P and Q\n"
00096     " (8) X = X - s lx', y = y - s ly, loop to (2) k times\n"
00097     " (9) Set W = (T P')^(+) T, where the ^(+) is the right pseudoinverse\n"
00098     "\n"
00099     " ==== Kernel ====\n"
00100     "The code implements a NIPALS-PLS-like algorithm, which is a so-called\n"
00101     "'kernel' algorithm (faster than more classical implementations).\n"
00102     "The algorithm, inspired from 'Factor Analysis in Chemistry' and above all\n"
00103     "www.statsoftinc.com/textbook/stpls.html, is the following:\n"
00104     " (1) Let X (n x p) = the centered and normalized input data\n"
00105     "     Let Y (n x m) = the centered and normalized target data\n"
00106     "     Let k be the number of components extracted\n"
00107     " (2) Initialize A_0 = X'Y, M_0 = X'X, C_0 = Identity(p), and h = 0\n"
00108     " (3) q_h = largest eigenvector of B_h = A_h' A_h, found by the NIPALS method:\n"
00109     "       (3.a) q_h = a (normalized) randomn column of B_h\n"
00110     "       (3.b) q_h = B_h q_h\n"
00111     "       (3.c) normalize q_h\n"
00112     "       (3.d) if q_h has changed by more than 'precision', go to (b)\n"
00113     " (4) w_h = C_h A_h q_h, normalize w_h and store it in a column of W (p x k)\n"
00114     " (5) p_h = M_h w_h, c_h = w_h' p_h, p_h = p_h / c_h and store it in a column\n"
00115     "     of P (p x k)\n"
00116     " (6) q_h = A_h' w_h / c_h, and store it in a column of Q (m x k)\n"
00117     " (7) A_h+1 = A_h - c_h p_h q_h'\n"
00118     "     M_h+1 = M_h - c_h p_h p_h',\n"
00119     "     C_h+1 = C_h - w_h p_h\n"
00120     " (8) h = h+1, and if h < k, go to (3)\n"
00121     "\n"
00122     "The result is then given by:\n"
00123     " - Y = X B, with B (p x m) = W Q'\n"
00124     " - T = X W, where T is the score (reduced coordinates)\n"
00125     "\n"
00126     "You can choose to have the score (T) and / or the target (Y) in the output\n"
00127     "of the learner (default is target only, i.e. regression)."
00128     );
00129 
00131 // declareOptions //
00133 void PLS::declareOptions(OptionList& ol)
00134 {
00135     // Build options.
00136 
00137     declareOption(ol, "k", &PLS::k, OptionBase::buildoption,
00138                   "The number of components (factors) computed.");
00139 
00140     declareOption(ol, "method", &PLS::method, OptionBase::buildoption,
00141                   "The PLS algorithm used ('pls1' or 'kernel', see help for more details).\n");
00142 
00143     declareOption(ol, "output_the_score", &PLS::output_the_score, OptionBase::buildoption,
00144                   "If set to 1, then the score (the low-dimensional representation of the input)\n"
00145                   "will be included in the output (before the target).");
00146 
00147     declareOption(ol, "output_the_target", &PLS::output_the_target, OptionBase::buildoption,
00148                   "If set to 1, then (the prediction of) the target will be included in the\n"
00149                   "output (after the score).");
00150 
00151     declareOption(ol, "compute_confidence", &PLS::compute_confidence,
00152                   OptionBase::buildoption,
00153                   "If set to 1, the variance of the residuals on the training set is\n"
00154                   "computed after training in order to allow the computation of confidence\n"
00155                   "intervals.  In the current implementation, this entails performing another\n"
00156                   "traversal of the training set.");
00157 
00158     // Learnt options.
00159 
00160     declareOption(ol, "B", &PLS::B, OptionBase::learntoption,
00161                   "The regression matrix in Y = X.B + E.");
00162 
00163     declareOption(ol, "m", &PLS::m, OptionBase::learntoption,
00164                   "Used to store the target size.");
00165 
00166     declareOption(ol, "mean_input", &PLS::mean_input, OptionBase::learntoption,
00167                   "The mean of the input data X.");
00168 
00169     declareOption(ol, "mean_target", &PLS::mean_target, OptionBase::learntoption,
00170                   "The mean of the target data Y.");
00171 
00172     declareOption(ol, "p", &PLS::p, OptionBase::learntoption,
00173                   "Used to store the input size.");
00174 
00175     declareOption(ol, "precision", &PLS::precision, OptionBase::buildoption,
00176                   "The precision to which we compute the eigenvectors.");
00177 
00178     declareOption(ol, "stddev_input", &PLS::stddev_input, OptionBase::learntoption,
00179                   "The standard deviation of the input data X.");
00180 
00181     declareOption(ol, "stddev_target", &PLS::stddev_target, OptionBase::learntoption,
00182                   "The standard deviation of the target data Y.");
00183 
00184     declareOption(ol, "W", &PLS::W, OptionBase::learntoption,
00185                   "The regression matrix in T = X.W.");
00186 
00187     declareOption(ol, "resid_variance", &PLS::resid_variance, OptionBase::learntoption,
00188                   "Estimate of the residual variance for each output variable.  Saved as a\n"
00189                   "learned option to allow outputting confidence intervals when model is\n"
00190                   "reloaded and used in test mode.  These are saved only if the option\n"
00191                   "'compute_confidence' is true at train-time.");
00192     
00193     // Now call the parent class' declareOptions
00194     inherited::declareOptions(ol);
00195 }
00196 
00198 // build //
00200 void PLS::build()
00201 {
00202     inherited::build();
00203     build_();
00204 }
00205 
00207 // build_ //
00209 void PLS::build_()
00210 {
00211     if (train_set) {
00212         this->m = train_set->targetsize();
00213         this->p = train_set->inputsize();
00214         mean_input.resize(p);
00215         stddev_input.resize(p);
00216         mean_target.resize(m);
00217         stddev_target.resize(m);
00218         if (train_set->weightsize() > 0) {
00219             PLWARNING("In PLS::build_ - The train set has weights, but the optimization algorithm won't use them");
00220         }
00221         // Check method consistency.
00222         if (method == "pls1") {
00223             // Make sure the target is 1-dimensional.
00224             if (m != 1) {
00225                 PLERROR("In PLS::build_ - With the 'pls1' method, target should be 1-dimensional");
00226             }
00227         } else if (method == "kernel") {
00228             // Everything should be ok.
00229         } else {
00230             PLERROR("In PLS::build_ - Unknown value for option 'method'");
00231         }
00232     }
00233     if (!output_the_score && !output_the_target) {
00234         // Weird, we don't want any output ??
00235         PLWARNING("In PLS::build_ - There will be no output");
00236     }
00237 }
00238 
00240 // computeCostsFromOutputs //
00242 void PLS::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00243                                   const Vec& target, Vec& costs) const
00244 {
00245     // No cost computed.
00246 }
00247 
00249 // computeOutput //
00251 void PLS::computeOutput(const Vec& input, Vec& output) const
00252 {
00253     static Vec input_copy;
00254     if (W.width()==0)
00255         PLERROR("PLS::computeOutput but model was not trained!");
00256     // Compute the output from the input
00257     int nout = outputsize();
00258     output.resize(nout);
00259     // First normalize the input.
00260     input_copy.resize(this->p);
00261     input_copy << input;
00262     input_copy -= mean_input;
00263     input_copy /= stddev_input;
00264     int target_start = 0;
00265     if (output_the_score) {
00266         transposeProduct(output.subVec(0, this->k), W, input_copy);
00267         target_start = this->k;
00268     }
00269     if (output_the_target) {
00270         if (this->m > 0) {
00271             Vec target = output.subVec(target_start, this->m);
00272             transposeProduct(target, B, input_copy);
00273             target *= stddev_target;
00274             target += mean_target;
00275         } else {
00276             // This is just a safety check, since it should never happen.
00277             PLWARNING("In PLS::computeOutput - You ask to output the target but the target size is <= 0");
00278         }
00279     }
00280 }
00281 
00282 
00284 // computeConfidenceFromOutput //
00286 
00287 bool PLS::computeConfidenceFromOutput(const Vec&, const Vec& output, real probability,
00288                                       TVec< pair<real,real> >& intervals) const
00289 {
00290     // Must figure out where the real output starts within the output vector
00291     if (! output_the_target)
00292         PLERROR("PLS::computeConfidenceFromOutput: the option 'output_the_target' "
00293                 "must be enabled in order to compute confidence intervals");
00294     int ostart = (output_the_score? k : 0);
00295     Vec regr_output = output.subVec(ostart, m);
00296 
00297     if (m != resid_variance.size())
00298         PLERROR("PLS::computeConfidenceFromOutput: residual variance not yet computed "
00299                 "or its size (= %d) does not match the output size (= %d)",
00300                 resid_variance.size(), m);
00301 
00302     // two-tailed
00303     const real multiplier = gauss_01_quantile((1+probability)/2);
00304     intervals.resize(m);
00305     for (int i=0; i<m; ++i) {
00306         real half_width = multiplier * sqrt(resid_variance[i]);
00307         intervals[i] = std::make_pair(output[i] - half_width,
00308                                       output[i] + half_width);
00309     }
00310     return true;
00311 }
00312 
00313 
00315 // forget //
00317 void PLS::forget()
00318 {
00319     stage = 0;
00320     // Free memory.
00321     B = Mat();
00322     W = Mat();
00323 }
00324 
00326 // getTestCostNames //
00328 TVec<string> PLS::getTestCostNames() const
00329 {
00330     // No cost computed.
00331     TVec<string> t;
00332     return t;
00333 }
00334 
00336 // getTrainCostNames //
00338 TVec<string> PLS::getTrainCostNames() const
00339 {
00340     // No cost computed.
00341     TVec<string> t;
00342     return t;
00343 }
00344 
00346 // makeDeepCopyFromShallowCopy //
00348 void PLS::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00349 {
00350     inherited::makeDeepCopyFromShallowCopy(copies);
00351 
00352     // ### Call deepCopyField on all "pointer-like" fields 
00353     // ### that you wish to be deepCopied rather than 
00354     // ### shallow-copied.
00355     // ### ex:
00356     deepCopyField(B, copies);
00357     deepCopyField(mean_input, copies);
00358     deepCopyField(mean_target, copies);
00359     deepCopyField(stddev_input, copies);
00360     deepCopyField(stddev_target, copies);
00361     deepCopyField(W, copies);
00362     deepCopyField(resid_variance, copies);
00363 }
00364 
00366 // NIPALSEigenvector //
00368 void PLS::NIPALSEigenvector(const Mat& m, Vec& v, real precision) {
00369     int n = v.length();
00370     Vec w(n);
00371     v << m.column(0);
00372     normalize(v, 2.0);
00373     bool ok = false;
00374     while (!ok) {
00375         w << v;
00376         product(v, m, w);
00377         normalize(v, 2.0);
00378         ok = true;
00379         for (int i = 0; i < n && ok; i++) {
00380             if (fabs(v[i] - w[i]) > precision) {
00381                 ok = false;
00382             }
00383         }
00384     }
00385 }
00386 
00388 // outputsize //
00390 int PLS::outputsize() const
00391 {
00392     int os = 0;
00393     if (output_the_score) {
00394         os += this->k;
00395     }
00396     if (output_the_target && m >= 0) {
00397         // If m < 0, this means we don't know yet the target size, thus we
00398         // shouldn't report it here.
00399         os += this->m;
00400     }
00401     return os;
00402 }
00403 
00405 // train //
00407 void PLS::train()
00408 {
00409     if (stage == 1) {
00410         // Already trained.
00411         MODULE_LOG << "Skipping PLS training" << endl;
00412         return;
00413     }
00414     MODULE_LOG << "PLS training started" << endl;
00415 
00416     // Construct the centered and normalized training set, for the input
00417     // as well as the target part.
00418     DBG_MODULE_LOG << "Normalizing of the data" << endl;
00419     VMat input_part = new SubVMatrix(train_set,
00420                                      0, 0,
00421                                      train_set->length(),
00422                                      train_set->inputsize());
00423     VMat target_part = new SubVMatrix( train_set,
00424                                        0, train_set->inputsize(),
00425                                        train_set->length(),
00426                                        train_set->targetsize());
00427 
00428     PP<ShiftAndRescaleVMatrix> X_vmat =
00429         new ShiftAndRescaleVMatrix(input_part, true);
00430     X_vmat->verbosity = this->verbosity;
00431     mean_input << X_vmat->shift;
00432     stddev_input << X_vmat->scale;
00433     negateElements(mean_input);
00434     invertElements(stddev_input);
00435 
00436     PP<ShiftAndRescaleVMatrix> Y_vmat =
00437         new ShiftAndRescaleVMatrix(target_part, target_part->width(), true);
00438     Y_vmat->verbosity = this->verbosity;
00439     mean_target << Y_vmat->shift;
00440     stddev_target << Y_vmat->scale;
00441     negateElements(mean_target);
00442     invertElements(stddev_target);
00443 
00444     // Some common initialization.
00445     W.resize(p, k);
00446     Mat P(p, k);
00447     Mat Q(m, k);
00448     int n = X_vmat->length();
00449     VMat X_vmatrix = static_cast<ShiftAndRescaleVMatrix*>(X_vmat);
00450     VMat Y_vmatrix = static_cast<ShiftAndRescaleVMatrix*>(Y_vmat);
00451 
00452     if (method == "kernel") {
00453         // Initialize the various coefficients.
00454         DBG_MODULE_LOG << "Initialization of the coefficients" << endl;
00455         Vec ph(p);
00456         Vec qh(m);
00457         Vec wh(p);
00458         Vec tmp(p);
00459         real ch;
00460         Mat Ah = transposeProduct(X_vmatrix, Y_vmatrix);
00461         Mat Mh = transposeProduct(X_vmatrix, X_vmatrix);
00462         Mat Ch(p,p);    // Initialized to Identity(p).
00463         Mat Ah_t_Ah;
00464         Mat update_Ah(p,m);
00465         Mat update_Mh(p,p);
00466         Mat update_Ch(p,p);
00467         for (int i = 0; i < p; i++) {
00468             for (int j = i+1; j < p; j++) {
00469                 Ch(i,j) = Ch(j,i) = 0;
00470             }
00471             Ch(i,i) = 1;
00472         }
00473 
00474         // Iterate k times to find the k first factors.
00475         PP<ProgressBar> pb(
00476             report_progress? new ProgressBar("Computing the PLS components", k)
00477             : 0);
00478 
00479         for (int h = 0; h < this->k; h++) {
00480             Ah_t_Ah = transposeProduct(Ah,Ah);
00481             if (m == 1) {
00482                 // No need to compute the eigenvector.
00483                 qh[0] = 1;
00484             } else {
00485                 NIPALSEigenvector(Ah_t_Ah, qh, precision);
00486             }
00487             product(tmp, Ah, qh);
00488             product(wh, Ch, tmp);
00489             normalize(wh, 2.0);
00490             W.column(h) << wh;
00491             product(ph, Mh, wh);
00492             ch = dot(wh, ph);
00493             ph /= ch;
00494             P.column(h) << ph;
00495             transposeProduct(qh, Ah, wh);
00496             qh /= ch;
00497             Q.column(h) << qh;
00498             Mat ph_mat(p, 1, ph);
00499             Mat qh_mat(m, 1, qh);
00500             Mat wh_mat(p, 1, wh);
00501             update_Ah = productTranspose(ph_mat, qh_mat);
00502             update_Ah *= ch;
00503             Ah -= update_Ah;
00504             update_Mh = productTranspose(ph_mat, ph_mat);
00505             update_Mh *= ch;
00506             Mh -= update_Mh;
00507             update_Ch = productTranspose(wh_mat, ph_mat);
00508             Ch -= update_Ch;
00509             if (pb)
00510                 pb->update(h + 1);
00511         }
00512     } else if (method == "pls1") {
00513         Vec s(n);
00514         Vec old_s(n);
00515         Vec y(n);
00516         Vec lx(p);
00517         Vec ly(1);
00518         Mat T(n,k);
00519         Mat X = X_vmatrix->toMat();
00520         y << Y_vmatrix->toMat();
00521 
00522         PP<ProgressBar> pb(
00523             report_progress? new ProgressBar("Computing the PLS components", k)
00524             : 0);
00525 
00526         for (int h = 0; h < k; h++) {
00527             if (pb)
00528                 pb->update(h);
00529             s << y;
00530             normalize(s, 2.0);
00531             bool finished = false;
00532             while (!finished) {
00533                 old_s << s;
00534                 transposeProduct(lx, X, s);
00535                 product(s, X, lx);
00536                 normalize(s, 2.0);
00537                 if (dist(old_s, s, 2) < precision) {
00538                     finished = true;
00539                 }
00540             }
00541             ly[0] = dot(s, y);
00542             transposeProduct(lx, X, s);
00543             T.column(h) << s;
00544             P.column(h) << lx;
00545             Q.column(h) << ly;
00546             // X = X - s lx'
00547             // y = y - s ly
00548             for (int i = 0; i < n; i++) {
00549                 for (int j = 0; j < p; j++) {
00550                     X(i,j) -= s[i] * lx[j];
00551                 }
00552                 y[i] -= s[i] * ly[0];
00553             }
00554         }
00555         DBG_MODULE_LOG << " Computation of the corresponding coefficients" << endl;
00556         Mat tmp(n, p);
00557         productTranspose(tmp, T, P);
00558         Mat U, Vt;
00559         Vec D;
00560         real safeguard = 1.1; // Because the SVD may crash otherwise.
00561         SVD(tmp, U, D, Vt, 'A', safeguard);
00562         for (int i = 0; i < D.length(); i++) {
00563             if (abs(D[i]) < precision) {
00564                 D[i] = 0;
00565             } else {
00566                 D[i] = 1.0 / D[i];
00567             }
00568         }
00569         Mat tmp2(n,p);
00570         tmp2.fill(0);
00571         for (int i = 0; i < D.length(); i++) {
00572             if (!fast_exact_is_equal(D[i], 0)) {
00573                 tmp2(i) << D[i] * Vt(i);
00574             }
00575         }
00576         product(tmp, U, tmp2);
00577         transposeProduct(W, tmp, T);
00578     }
00579     B.resize(p,m);
00580     productTranspose(B, W, Q);
00581 
00582     // If we requested confidence intervals, compute the variance of the
00583     // residuals on the training set
00584     if (compute_confidence)
00585         computeResidVariance(train_set, resid_variance);
00586     else
00587         resid_variance.resize(0);
00588     
00589     MODULE_LOG << "PLS training ended" << endl;
00590     stage = 1;
00591 }
00592 
00593 
00594 //#####  computeResidVariance  ################################################
00595 
00596 void PLS::computeResidVariance(VMat dataset, Vec& resid_variance)
00597 {
00598     PLASSERT( dataset.isNotNull() && m >= 0 );
00599     bool old_output_score = output_the_score;
00600     bool old_output_target= output_the_target;
00601     output_the_score  = false;
00602     output_the_target = true;
00603 
00604     resid_variance.resize(m);
00605     resid_variance.fill(0.0);
00606     Vec input, target, output(m);
00607     real weight;
00608     for (int i=0, n=dataset.length() ; i<n ; ++i) {
00609         dataset->getExample(i, input, target, weight);
00610         computeOutput(input, output);
00611         target -= output;
00612         target *= target;                    // Square of residual
00613         resid_variance += target;
00614     }
00615     resid_variance /= (dataset.length() - inputsize());
00616 
00617     output_the_score  = old_output_score;
00618     output_the_target = old_output_target;
00619 }
00620 
00621 
00622 } // end of namespace PLearn
00623 
00624 
00625 /*
00626   Local Variables:
00627   mode:c++
00628   c-basic-offset:4
00629   c-file-style:"stroustrup"
00630   c-file-offsets:((innamespace . 0)(inline-open . 0))
00631   indent-tabs-mode:nil
00632   fill-column:79
00633   End:
00634 */
00635 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines