PLearn 0.1
ConjGradientOptimizer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 2003,2006 Olivier Delalleau
00005 
00006 // Redistribution and use in source and binary forms, with or without
00007 // modification, are permitted provided that the following conditions are met:
00008 // 
00009 //  1. Redistributions of source code must retain the above copyright
00010 //     notice, this list of conditions and the following disclaimer.
00011 // 
00012 //  2. Redistributions in binary form must reproduce the above copyright
00013 //     notice, this list of conditions and the following disclaimer in the
00014 //     documentation and/or other materials provided with the distribution.
00015 // 
00016 //  3. The name of the authors may not be used to endorse or promote
00017 //     products derived from this software without specific prior written
00018 //     permission.
00019 // 
00020 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00021 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00022 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00023 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00025 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00026 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00027 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00028 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00029 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030 // 
00031 // This file is part of the PLearn library. For more information on the PLearn
00032 // library, go to the PLearn Web site at www.plearn.org
00033 
00034  
00035 
00036 /* *******************************************************      
00037  * $Id: ConjGradientOptimizer.cc 8916 2008-04-30 14:12:24Z nouiz $
00038  * This file is part of the PLearn library.
00039  ******************************************************* */
00040 
00041 #define PL_LOG_MODULE_NAME "ConjGradientOptimizer"
00042 
00043 #include "ConjGradientOptimizer.h"
00044 #include <plearn/io/pl_log.h>
00045 #include <plearn/var/SumOfVariable.h>
00046 #include <plearn/math/TMat_maths.h>
00047 
00048 namespace PLearn {
00049 using namespace std;
00050 
00052 // ConjGradientOptimizer //
00054 ConjGradientOptimizer::ConjGradientOptimizer()
00055     : constrain_limit(0.1),
00056       expected_red(1),
00057       max_extrapolate(3),
00058       rho(1e-2),
00059       sigma(0.5),
00060       slope_ratio(100),
00061       max_eval_per_line_search(20),
00062       no_negative_gamma(true),
00063       verbosity(0),
00064       minibatch_n_samples(0),
00065       minibatch_n_line_searches(3),
00066       minibatch_curpos(0),
00067       line_search_failed(false),
00068       line_search_succeeded(false)
00069 { }
00070 
00071 PLEARN_IMPLEMENT_OBJECT(
00072     ConjGradientOptimizer,
00073     "Optimizer based on the conjugate gradient method.",
00074     "The conjugate gradient algorithm is basically the following :\n"
00075     "- 0: initialize the search direction d = -gradient\n"
00076     "- 1: perform a line search along direction d for the minimum of the\n"
00077     "     function value\n"
00078     "- 2: move to this minimum, update the search direction d and go to\n"
00079     "     step 1\n"
00080     "The line search algorithm is inspired by Carl Edward Rasmussen's Matlab\n"
00081     "algorithm from:\n"
00082     "http://www.kyb.tuebingen.mpg.de/bs/people/carl/code/minimize/minimize.m\n"
00083     "\n"
00084     "Many options can be set, however the provided default values should\n"
00085     "be adequate in most cases.\n"
00086     "\n"
00087     "A few comments reproduced from Rasmussen's code; we are very grateful for\n"
00088     "his publishing detailed code:\n"
00089     "\n"
00090     "- The Polack-Ribiere flavour of conjugate gradients is used to compute search\n"
00091     "  directions, and a line search using quadratic and cubic polynomial\n"
00092     "  approximations and the Wolfe-Powell stopping criteria is used together with\n"
00093     "  the slope ratio method for guessing initial step sizes. Additionally a bunch\n"
00094     "  of checks are made to make sure that exploration is taking place and that\n"
00095     "  extrapolation will not be unboundedly large.\n"
00096     "\n"
00097     "- The code falls naturally into 3 parts, after the initial line search is\n"
00098     "  started in the direction of steepest descent. 1) we first enter a while loop\n"
00099     "  which uses point 1 (p1) and (p2) to compute an extrapolation (p3), until we\n"
00100     "  have extrapolated far enough (Wolfe-Powell conditions). 2) if necessary, we\n"
00101     "  enter the second loop which takes p2, p3 and p4 chooses the subinterval\n"
00102     "  containing a (local) minimum, and interpolates it, unil an acceptable point\n"
00103     "  is found (Wolfe-Powell conditions). Note, that points are always maintained\n"
00104     "  in order p0 <= p1 <= p2 < p3 < p4. 3) compute a new search direction using\n"
00105     "  conjugate gradients (Polack-Ribiere flavour), or revert to steepest if there\n"
00106     "  was a problem in the previous line-search. Return the best value so far, if\n"
00107     "  two consecutive line-searches fail, or whenever we run out of function\n"
00108     "  evaluations or line-searches. During extrapolation, the \"f\" function may fail\n"
00109     "  either with an error or returning Nan or Inf, and minimize should handle this\n"
00110     "  gracefully.\n");
00111 
00113 // declareOptions //
00115 void ConjGradientOptimizer::declareOptions(OptionList& ol)
00116 {
00117     declareOption(
00118         ol, "verbosity", &ConjGradientOptimizer::verbosity,
00119         OptionBase::buildoption, 
00120         "Controls the amount of output.  If zero, does not print anything.\n"
00121         "If 'verbosity'=V, print the current cost if\n"
00122         "\n"
00123         "    stage % V == 0\n"
00124         "\n"
00125         "i.e. every V stages.  (Default=0)\n");
00126 
00127     declareOption(
00128         ol, "expected_red", &ConjGradientOptimizer::expected_red,
00129         OptionBase::buildoption, 
00130         "Expected function reduction at first step.");
00131 
00132     declareOption(
00133         ol, "no_negative_gamma",
00134         &ConjGradientOptimizer::no_negative_gamma,
00135         OptionBase::buildoption,
00136         "If true, then a negative value for gamma in the Polak-Ribiere\n"
00137         "formula will trigger a restart.");
00138 
00139     declareOption(
00140         ol, "sigma", &ConjGradientOptimizer::sigma,
00141         OptionBase::buildoption, 
00142         "Constant in the Wolfe-Powell stopping conditions.  It is the maximum allowed\n"
00143         "absolute ratio between previous and new slopes (derivatives in the search\n"
00144         "direction), thus setting sigma to low (positive) values forces higher\n"
00145         "precision in the line-searches.\n"
00146         "Tuning of sigma (depending on the nature of the function to be optimized) may\n"
00147         "may speed up the minimization.");
00148 
00149     declareOption(
00150         ol, "rho", &ConjGradientOptimizer::rho,
00151         OptionBase::buildoption, 
00152         "Constant in the Wolfe-Powell stopping conditions.\n"
00153         "Rho is the minimum allowed fraction of the expected (from the slope at the\n"
00154         "initial point in the linesearch). Constants must satisfy 0 < rho < sigma < 1.\n"
00155         "It is probably not worth playing much with rho.\n");
00156 
00157     declareOption(
00158         ol, "constrain_limit",
00159         &ConjGradientOptimizer::constrain_limit,
00160         OptionBase::buildoption, 
00161         "Multiplicative coefficient to constrain the evaluation bracket.\n"
00162         "We don't re-evaluate the function if we are within 'constrain_limit'\n"
00163         "of the current bracket.");
00164 
00165     declareOption(
00166         ol, "max_extrapolate",
00167         &ConjGradientOptimizer::max_extrapolate,
00168         OptionBase::buildoption, 
00169         "Maximum coefficient for bracket extrapolation.  This limits the\n"
00170         "extrapolation to be within 'max_extrapolate' times the current step-size");
00171 
00172     declareOption(
00173         ol, "max_eval_per_line_search",
00174         &ConjGradientOptimizer::max_eval_per_line_search,
00175         OptionBase::buildoption, 
00176         "Maximum number of function evalutions during line search.");
00177 
00178     declareOption(
00179         ol, "slope_ratio", &ConjGradientOptimizer::slope_ratio,
00180         OptionBase::buildoption, 
00181         "Maximum slope ratio.");
00182 
00183     declareOption(
00184         ol, "minibatch_n_samples", &ConjGradientOptimizer::minibatch_n_samples,
00185         OptionBase::buildoption, 
00186         "If >0 we'll do minibatch. In minibatch mode, weight updates are based on \n"
00187         "cost and gradients computed on a subset of the whole training set, made \n"
00188         "of minibatch_n_samples consecutive samples. Each such subset will be used \n"
00189         "to perform minibatch_n_line_searches line searches before moving to the \n"
00190         "next minibatch subset.\n");
00191 
00192     declareOption(
00193         ol, "minibatch_n_line_searches", &ConjGradientOptimizer::minibatch_n_line_searches,
00194         OptionBase::buildoption, 
00195         "How many line searches to perform with each minibatch subset.");
00196 
00197     inherited::declareOptions(ol);
00198 }
00199 
00200 
00202 // build_ //
00204 void ConjGradientOptimizer::build_() {
00205     // Make sure the internal data have the right size.
00206     int n = params.nelems();
00207     current_opp_gradient.resize(n);
00208     search_direction.resize(n);
00209     tmp_storage.resize(n);
00210     delta.resize(n);
00211 }
00212 
00214 // computeCostAndDerivative //
00216 void ConjGradientOptimizer::computeCostAndDerivative(
00217     real alpha, real& cost, real& derivative) {
00218     if (fast_exact_is_equal(alpha, 0)) {
00219         cost = this->current_cost;
00220         derivative = -dot(this->search_direction, this->current_opp_gradient);
00221     } else {
00222         this->params.copyTo(this->tmp_storage);
00223         this->params.update(alpha, this->search_direction);
00224         computeGradient(this->delta);
00225         cost = this->cost->value[0];
00226 
00227 #if 0
00228         Vec tmpparams(this->params.nelems());
00229         this->params >> tmpparams;
00230         perr << "Params: " << tmpparams << "   Cost: " << cost << endl;
00231 #endif
00232         
00233         derivative = dot(this->search_direction, this->delta);
00234         this->params.copyFrom(this->tmp_storage);
00235     }
00236 }
00237 
00239 // computeCostValue //
00241 real ConjGradientOptimizer::computeCostValue(real alpha)
00242 {
00243     if (fast_exact_is_equal(alpha, 0))
00244         return this->current_cost;
00245     this->params.copyTo(this->tmp_storage);
00246     this->params.update(alpha, this->search_direction);
00247     this->proppath.fprop();
00248     real c = this->cost->value[0];
00249 
00250 #if 0
00251     Vec tmpparams(this->params.nelems());
00252     this->params >> tmpparams;
00253     perr << "Params: " << tmpparams << "   Cost: " << c << endl;
00254 #endif
00255 
00256     this->params.copyFrom(this->tmp_storage);
00257     return c;
00258 }
00259 
00261 // computeDerivative //
00263 real ConjGradientOptimizer::computeDerivative(real alpha)
00264 {
00265     if (fast_exact_is_equal(alpha, 0))
00266         return -dot(this->search_direction, this->current_opp_gradient);
00267     this->params.copyTo(this->tmp_storage);
00268     this->params.update(alpha, this->search_direction);
00269     computeGradient(this->delta);
00270 
00271 #if 0
00272     Vec tmpparams(this->params.nelems());
00273     this->params >> tmpparams;
00274     perr << "Params: " << tmpparams << "   Cost: " << this->cost->value[0] << endl;
00275 #endif
00276 
00277     this->params.copyFrom(this->tmp_storage);
00278     return dot(this->search_direction, this->delta);
00279 }
00280 
00282 // findDirection //
00284 void ConjGradientOptimizer::findDirection() {
00285     real gamma = polakRibiere();
00286     if (gamma < 0 && no_negative_gamma) {
00287         if (verbosity > 0)
00288             MODULE_LOG << "gamma = " << gamma << " < 0 ==> Restarting" << endl;
00289         gamma = 0;
00290     }
00291     /*
00292     // Old code triggering restart.
00293     else {
00294         real dp = dot(delta, current_opp_gradient);
00295         real delta_n = pownorm(delta);
00296         if (abs(dp) > restart_coeff *delta_n ) {
00297             if (verbosity >= 5)
00298                 pout << "Restart triggered !" << endl;
00299             gamma = 0;
00300         }
00301     }
00302     */
00303     updateSearchDirection(gamma);
00304 }
00305 
00307 // minimizeLineSearch //
00309 real ConjGradientOptimizer::minimizeLineSearch()
00310 {
00311     // We may need to perform two iterations of line search if the first one
00312     // fails.
00313     bool try_again = true;
00314     while (try_again) {
00315         try_again = false;
00316         real fun_val0 = fun_val1;
00317         computeCostAndDerivative(step1, fun_val2, fun_deriv2);
00318         real fun_val3 = fun_val1;
00319         real fun_deriv3 = fun_deriv1;
00320         real step3 = - step1;
00321         fun_eval_count = max_eval_per_line_search;
00322         line_search_succeeded = false;
00323         bracket_limit = -1;
00324         while (true) {
00325             while ( (fun_val2 > fun_val1 + step1 * rho * fun_deriv1 ||
00326                      fun_deriv2 > - sigma * fun_deriv1 ) &&
00327                     fun_eval_count > 0 )
00328             {
00329                 // Tighten bracket.
00330                 bracket_limit = step1;
00331                 if (fun_val2 > fun_val1) {
00332                     // Quadratic fit.
00333                     step2 = step3 -
00334                         (0.5*fun_deriv3*step3*step3) / 
00335                         (fun_deriv3*step3+fun_val2-fun_val3);
00336                 } else {
00337                     // Cubic fit.
00338                     cubic_a = 6*(fun_val2-fun_val3)/step3 +
00339                               3*(fun_deriv2+fun_deriv3);
00340                     cubic_b = 3*(fun_val3-fun_val2) -
00341                               step3*(fun_deriv3+2*fun_deriv2);
00342                     step2 =
00343                         (sqrt(cubic_b*cubic_b-cubic_a*fun_deriv2*step3*step3) -
00344                          cubic_b) / cubic_a;
00345                 }
00346                 if (isnan(step2) || isinf(step2))
00347                     // Shit happens => bisection.
00348                     step2 = step3/2;
00349                 // Constrained range.
00350                 step2 = max(min(step2, constrain_limit*step3),
00351                             (1-constrain_limit)*step3);
00352                 // Increase step and update function value and derivative.
00353                 step1 += step2;
00354                 computeCostAndDerivative(step1, fun_val2, fun_deriv2);
00355                 // Update point 3.
00356                 step3 = step3 - step2;  
00357                 fun_eval_count--;
00358             }
00359             if (fun_val2 > fun_val1+step1*rho*fun_deriv1 ||
00360                 fun_deriv2 > -sigma*fun_deriv1)
00361                 // Failure.
00362                 break;
00363             else if (fun_deriv2 > sigma * fun_deriv1) {
00364                 // Sucesss.
00365                 line_search_succeeded = true;
00366                 break;
00367             } else if (fun_eval_count == 0)
00368                 // Failure.
00369                 break;
00370             // Cubic fit.
00371             cubic_a = 6*(fun_val2-fun_val3)/step3+3*(fun_deriv2+fun_deriv3);
00372             cubic_b = 3*(fun_val3-fun_val2)-step3*(fun_deriv3+2*fun_deriv2);
00373             step2 = -fun_deriv2*step3*step3 /
00374                 (cubic_b +
00375                  sqrt(cubic_b*cubic_b-cubic_a*fun_deriv2*step3*step3));
00376             if (isnan(step2) || isinf(step2) || step2 < 0) {
00377                 // Numerical issue, or wrong sign.
00378                 if (bracket_limit < -0.5)
00379                     // No upper limit.
00380                     step2 = step1 * (max_extrapolate - 1);
00381                 else
00382                     step2 = (bracket_limit - step1) / 2;
00383             } else if (bracket_limit > -0.5 && (step2 + step1 > bracket_limit))
00384                 // Extrapolation beyond maximum.
00385                 step2 = (bracket_limit - step1) / 2;
00386             else if (bracket_limit < -0.5 &&
00387                      step2+step1 > step1 * max_extrapolate) {
00388                 // Extrapolation beyond limit.
00389                 step2 = step1 * (max_extrapolate - 1);
00390             } else if (step2 < - step3 * constrain_limit) {
00391                 step2 = - step3 * constrain_limit;
00392                 // % too close to limit?
00393             } else if (bracket_limit > -0.5 &&
00394                        step2 < (bracket_limit - step1) * (1 - constrain_limit))
00395                 // Too close to limit.
00396                 step2 = (bracket_limit - step1) * (1 - constrain_limit);
00397             // Point 3 = point 2.
00398             fun_val3 = fun_val2;
00399             fun_deriv3 = fun_deriv2;
00400             step3 = - step2;
00401             // Update step and function value and derivative.
00402             step1 += step2;
00403             computeCostAndDerivative(step1, fun_val2, fun_deriv2);
00404             fun_eval_count--;
00405         }
00406 
00407         if (line_search_succeeded) {
00408             fun_val1 = fun_val2;
00409             line_search_failed = false;
00410         } else {
00411             // Come back to initial point.
00412             fun_val1 = fun_val0;
00413             // If it is the second time it fails, then we cannot do better.
00414             if (line_search_failed)
00415                 return 0;
00416             // Original code:
00417             // tmp = df1; df1 = df2; df2 = tmp; % swap derivatives
00418             // s = -df1; % try steepest
00419             // d1 = -s'*s;
00420             // We do not do that... it looks weird!
00421             // We will actually do s = -df0 as this seems more logical.
00422             // TODO See Carl Rasmussen's answer to email...
00423             fun_deriv1 = - pownorm(current_opp_gradient);
00424             step1 = 1 / (1 - fun_deriv1);
00425             line_search_failed = true;
00426             try_again = true;
00427         }
00428     }
00429     return step1;
00430 }
00431 
00433 // lineSearch //
00435 bool ConjGradientOptimizer::lineSearch() {
00436     real step = minimizeLineSearch();
00437     if (step < 0)
00438         // Hopefully this will not happen.
00439         PLWARNING("Negative step!");
00440     bool no_improvement_possible = fast_exact_is_equal(step, 0);
00441     if (no_improvement_possible) {
00442         if (verbosity > 0)
00443             MODULE_LOG << "No more progress made by the line search, stopping" << endl;
00444     } else
00445         params.update(step, search_direction);
00446     return !no_improvement_possible;
00447 }
00448 
00450 // makeDeepCopyFromShallowCopy //
00452 void ConjGradientOptimizer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00453 {
00454     inherited::makeDeepCopyFromShallowCopy(copies);
00455     deepCopyField(current_opp_gradient, copies);
00456     deepCopyField(search_direction, copies);
00457     deepCopyField(tmp_storage, copies);
00458     deepCopyField(delta, copies);
00459 }
00460 
00462 // optimizeN //
00464 bool ConjGradientOptimizer::optimizeN(VecStatsCollector& stats_coll) {
00465     int stage_max = stage + nstages; // The stage to reach.
00466 
00467     SumOfVariable* sumofvar = 0;
00468     int trainsetlength = -1;
00469     int minibatch_n_line_searches_left = minibatch_n_line_searches;
00470     if(minibatch_n_samples>0)
00471     {
00472         sumofvar = dynamic_cast<SumOfVariable*>((Variable*)cost);
00473         if(sumofvar)
00474         {
00475             trainsetlength = sumofvar->getDataSet()->length();
00476             sumofvar->setSampleRange(minibatch_curpos, minibatch_n_samples, true);
00477         }
00478         else
00479         {
00480             PLWARNING("In ConjGradientOptimizer, minibatch_n_samples>0 but can't "
00481                       "do minibatch since cost does not seem to be a SumOfVariable "
00482                       " (the only type of variable for which minibatch is supported)");
00483         }
00484     }
00485     
00486     if (stage == 0)
00487     {
00488         computeOppositeGradient(current_opp_gradient);
00489         // First search direction = - gradient.
00490         search_direction <<  current_opp_gradient;
00491         current_cost = cost->value[0];
00492 
00493         fun_val1 = current_cost;
00494         fun_deriv1 = - pownorm(search_direction);
00495         step1 = expected_red / ( 1 - fun_deriv1 );
00496     }
00497 
00498     if (early_stop) {
00499         // The 'early_stop' flag is already set: we must still update the stats
00500         // collector with the current cost value.
00501         this->proppath.fprop();
00502         stats_coll.update(cost->value);    
00503     }
00504 
00505     for (; !early_stop && stage<stage_max; stage++) {
00506 
00507         if(sumofvar && minibatch_n_line_searches_left==0)
00508         {
00509             minibatch_curpos = (minibatch_curpos+minibatch_n_samples)%trainsetlength;
00510             sumofvar->setSampleRange(minibatch_curpos, minibatch_n_samples, true);
00511             minibatch_n_line_searches_left = minibatch_n_line_searches;            
00512         }
00513 
00514         // Make a line search along the current search direction.
00515         early_stop = !lineSearch();
00516         if(sumofvar) // we're doing minibatch
00517             --minibatch_n_line_searches_left;
00518             
00519         // Ensure 'delta' contains the opposite gradient at the new point
00520         // reached after the line search.
00521         // Also update 'current_cost'.
00522         computeOppositeGradient(delta);
00523         current_cost = cost->value[0];
00524         // Display current cost value if required.
00525         if (verbosity > 0 && stage % verbosity == 0)
00526             MODULE_LOG << "Stage " << stage << ": "
00527                        << current_cost
00528                        << endl;
00529         stats_coll.update(cost->value);
00530     
00531         // Find the new search direction if we need to continue.
00532         if (!early_stop)
00533             findDirection();
00534     }
00535 
00536     if (early_stop && verbosity > 0)
00537         MODULE_LOG << "Early stopping at stage " << stage
00538                    << "; current-cost=" << current_cost
00539                    << endl;
00540 
00541     return early_stop;
00542 }
00543 
00545 // polakRibiere //
00547 real ConjGradientOptimizer::polakRibiere()
00548 {
00549     real normg = pownorm(this->current_opp_gradient);
00550     // At this point, delta = opposite gradient at new point.
00551     this->tmp_storage << this->delta;
00552     this->tmp_storage -= this->current_opp_gradient;
00553     return dot(this->tmp_storage, this->delta) / normg;
00554 }
00555 
00557 // reset //
00559 void ConjGradientOptimizer::reset() {
00560     inherited::reset();
00561     line_search_failed = false;
00562     line_search_succeeded = false;
00563     minibatch_curpos = 0;
00564 }
00565 
00567 // updateSearchDirection //
00569 void ConjGradientOptimizer::updateSearchDirection(real gamma) {
00570     if (fast_exact_is_equal(gamma, 0))
00571         search_direction << delta;
00572     else
00573         for (int i=0; i<search_direction.length(); i++)
00574             search_direction[i] = delta[i] + gamma * search_direction[i];
00575 
00576     // Update 'current_opp_gradient' for the new current point.
00577     current_opp_gradient << delta;
00578     fun_deriv2 = - dot(current_opp_gradient, search_direction);
00579     if (fun_deriv2 > 0) {
00580         search_direction << current_opp_gradient;
00581         fun_deriv2 = - pownorm(search_direction);
00582     }
00583     step1 = step1 * min(slope_ratio, fun_deriv1/(fun_deriv2-REAL_EPSILON));
00584     fun_deriv1 = fun_deriv2;
00585 }
00586 
00587 } // end of namespace PLearn
00588 
00589 
00590 /*
00591   Local Variables:
00592   mode:c++
00593   c-basic-offset:4
00594   c-file-style:"stroustrup"
00595   c-file-offsets:((innamespace . 0)(inline-open . 0))
00596   indent-tabs-mode:nil
00597   fill-column:79
00598   End:
00599 */
00600 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines