PLearn 0.1
PvGradNNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PvGradNNet.cc
00004 //
00005 // Copyright (C) 2007 PA M, Pascal V
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: P AM, Pascal V
00036 
00039 #include "PvGradNNet.h"
00040 #include <plearn/math/pl_erf.h>
00041 
00042 namespace PLearn {
00043 using namespace std;
00044 
00045 PLEARN_IMPLEMENT_OBJECT(
00046     PvGradNNet,
00047     "Multi-layer neural network for experimenting with Pascal V's gradient idea.",
00048     "See the twiki's SRPropGradient entry.\n"
00049     );
00050 
00051 PvGradNNet::PvGradNNet()
00052     : mNNet(),
00053       pv_initial_stepsize(1e-1),
00054       pv_min_stepsize(1e-6),
00055       pv_max_stepsize(50.0),
00056       pv_acceleration(1.2),
00057       pv_deceleration(0.5),
00058       pv_min_samples(2),
00059       pv_required_confidence(0.80),
00060       pv_conf_ct(0.0),
00061       pv_strategy(1),
00062       pv_random_sample_step(false),
00063       pv_self_discount(0.5),
00064       pv_other_discount(0.95),
00065       pv_within_neuron_discount(0.95),
00066       n_updates(0),
00067       limit_ratio(0.0),
00068       n_small_ratios(0.0)
00069 {
00070     random_gen = new PRandom();
00071 }
00072 
00073 void PvGradNNet::declareOptions(OptionList& ol)
00074 {
00075     declareOption(ol, "pv_initial_stepsize",
00076                   &PvGradNNet::pv_initial_stepsize,
00077                   OptionBase::buildoption,
00078                   "Initial size of steps in parameter space");
00079 
00080     declareOption(ol, "pv_min_stepsize",
00081                   &PvGradNNet::pv_min_stepsize,
00082                   OptionBase::buildoption,
00083                   "Minimal size of steps in parameter space");
00084 
00085     declareOption(ol, "pv_max_stepsize",
00086                   &PvGradNNet::pv_max_stepsize,
00087                   OptionBase::buildoption,
00088                   "Maximal size of steps in parameter space");
00089 
00090     declareOption(ol, "pv_acceleration",
00091                   &PvGradNNet::pv_acceleration,
00092                   OptionBase::buildoption,
00093                   "Coefficient by which to multiply the step sizes.");
00094 
00095     declareOption(ol, "pv_deceleration",
00096                   &PvGradNNet::pv_deceleration,
00097                   OptionBase::buildoption,
00098                   "Coefficient by which to multiply the step sizes.");
00099 
00100     declareOption(ol, "pv_min_samples",
00101                   &PvGradNNet::pv_min_samples,
00102                   OptionBase::buildoption,
00103                   "PV's minimum number of samples to estimate gradient sign.\n"
00104                   "This should at least be 2.");
00105 
00106     declareOption(ol, "pv_required_confidence",
00107                   &PvGradNNet::pv_required_confidence,
00108                   OptionBase::buildoption,
00109                   "Minimum required confidence (probability of being positive or negative) for taking a step.");
00110 
00111     declareOption(ol, "pv_conf_ct",
00112                   &PvGradNNet::pv_conf_ct,
00113                   OptionBase::buildoption,
00114                   "Used for confidence adaptation.");
00115 
00116     declareOption(ol, "pv_strategy",
00117                   &PvGradNNet::pv_strategy,
00118                   OptionBase::buildoption,
00119                   "Strategy to use for the weight updates (number from 1 to 4).");
00120 
00121     declareOption(ol, "pv_random_sample_step",
00122                   &PvGradNNet::pv_random_sample_step,
00123                   OptionBase::buildoption,
00124                   "If this is set to true, then we will randomly choose the step sign\n"
00125                   "for each parameter based on the estimated probability of it being\n"
00126                   "positive or negative.");
00127 
00128     declareOption(ol, "pv_self_discount",
00129                   &PvGradNNet::pv_self_discount,
00130                   OptionBase::buildoption,
00131                   "Discount used to perform soft invalidation of a weight's statistics\n"
00132                   "after its update.");
00133 
00134     declareOption(ol, "pv_other_discount",
00135                   &PvGradNNet::pv_other_discount,
00136                   OptionBase::buildoption,
00137                   "Discount used to perform soft invalidation of other weights'\n" 
00138                   "statistics after a weight update.");
00139 
00140     declareOption(ol, "pv_within_neuron_discount",
00141                   &PvGradNNet::pv_within_neuron_discount,
00142                   OptionBase::buildoption,
00143                   "Discount used to perform soft invalidation of other weights'\n"
00144                   "(same neuron) statistics after a weight update.");
00145 
00146     // Now call the parent class' declareOptions
00147     inherited::declareOptions(ol);
00148 }
00149 
00150 // TODO - reloading an object will not work! layer_params will juste get lost.
00151 void PvGradNNet::build_()
00152 {
00153     int n = all_params.length();
00154     pv_all_nsamples.resize(n);
00155     pv_all_sum.resize(n);
00156     pv_all_sumsquare.resize(n);
00157     pv_all_stepsigns.resize(n);
00158     pv_all_stepsizes.resize(n);
00159 
00160     // Get some structure on the previous Vecs
00161     pv_layer_nsamples.resize(n_layers-1);
00162     pv_layer_sum.resize(n_layers-1);
00163     pv_layer_sumsquare.resize(n_layers-1);
00164     pv_layer_stepsigns.resize(n_layers-1);
00165     pv_layer_stepsizes.resize(n_layers-1);
00166     int np;
00167     int n_neurons=0;
00168     for (int i=0,p=0;i<n_layers-1;i++)  {
00169         np=layer_sizes[i+1]*(1+layer_sizes[i]);
00170         pv_layer_nsamples.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00171         pv_layer_sum.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00172         pv_layer_sumsquare.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00173         pv_layer_stepsigns[i]=pv_all_stepsigns.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00174         pv_layer_stepsizes[i]=pv_all_stepsizes.subVec(p,np).toMat(layer_sizes[i+1],layer_sizes[i]+1);
00175         p+=np;
00176         n_neurons+=layer_sizes[i+1];
00177     }
00178     n_neuron_updates.resize(n_neurons);
00179 
00180 }
00181 
00182 // ### Nothing to add here, simply calls build_
00183 void PvGradNNet::build()
00184 {
00185     inherited::build();
00186     build_();
00187 }
00188 
00189 
00190 void PvGradNNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00191 {
00192     inherited::makeDeepCopyFromShallowCopy(copies);
00193 
00194     deepCopyField(pv_all_nsamples, copies); 
00195     deepCopyField(pv_layer_nsamples, copies);
00196     deepCopyField(pv_all_sum, copies); 
00197     deepCopyField(pv_layer_sum, copies);
00198     deepCopyField(pv_all_sumsquare, copies);
00199     deepCopyField(pv_layer_sumsquare, copies);
00200     deepCopyField(pv_all_stepsigns, copies);
00201     deepCopyField(pv_layer_stepsigns, copies);
00202     deepCopyField(pv_all_stepsizes, copies);
00203     deepCopyField(pv_layer_stepsizes, copies);
00204     deepCopyField(n_neuron_updates, copies);
00205 //    deepCopyField(pv_gradstats, copies);
00206 }
00207 
00208 void PvGradNNet::forget()
00209 {
00213     inherited::forget();
00214 
00215     pv_all_nsamples.fill(0);
00216     pv_all_sum.fill(0.0);
00217     pv_all_sumsquare.fill(0.0);
00218     pv_all_stepsigns.fill(0);
00219     pv_all_stepsizes.fill(pv_initial_stepsize);
00220 
00221     // used in the discountGrad() strategy
00222     n_updates = 0; 
00223     n_small_ratios=0.0;
00224     n_neuron_updates.fill(0);    
00225 //    pv_gradstats->forget();
00226 
00227     limit_ratio = gauss_01_quantile(pv_required_confidence);
00228 }
00229 
00231 void PvGradNNet::bpropUpdateNet(int t)
00232 {
00233     bpropNet(t);
00234 
00235     switch( pv_strategy )   {
00236         case 1 :
00237             pvGrad();
00238             break;
00239         case 2 :
00240             discountGrad();
00241             break;
00242         case 3 :
00243             neuronDiscountGrad();
00244             break;
00245         case 4 :
00246             globalSyncGrad();
00247             break;
00248         case 5 :
00249             neuronSyncGrad();
00250             break;
00251         default :
00252             PLERROR("PvGradNNet::bpropUpdateNet() - No such pv_strategy.");
00253     }
00254     // hack
00255     if( (t%160000)==0 )  {
00256         cout << n_small_ratios << " small ratios." << endl;
00257         n_small_ratios = 0.0;
00258     }
00259         
00260 }
00261 
00262 void PvGradNNet::pvGrad()  
00263 {
00264     int np = all_params.length();
00265     real m, e;//, prob_pos, prob_neg;
00266     //real ratio;
00267     // move this stuff to a train function to avoid repeated computations
00268     if( pv_conf_ct != 0.0 ) {     
00269         real conf = pv_required_confidence; 
00270         conf += (1.0-pv_required_confidence) * (stage/stage+pv_conf_ct);
00271         limit_ratio = gauss_01_quantile(conf);
00272     }
00273 
00274     for(int k=0; k<np; k++) {
00275         // update stats
00276         pv_all_nsamples[k]++;
00277         pv_all_sum[k] += all_params_gradient[k];
00278         pv_all_sumsquare[k] += all_params_gradient[k] * all_params_gradient[k];
00279 
00280         if(pv_all_nsamples[k]>pv_min_samples)   {
00281             real inv_pv_all_nsamples_k = 1./pv_all_nsamples[k];
00282             real pv_all_sum_k = pv_all_sum[k];
00283             m = pv_all_sum_k * inv_pv_all_nsamples_k;
00284             // e is the standard error
00285             // variance
00286             //e = real((pv_all_sumsquare[k] - square(pv_all_sum[k])/pv_all_nsamples[k])/(pv_all_nsamples[k]-1));
00287             // standard error 
00288             //e = sqrt(e*inv_pv_all_nsamples_k);
00289             // This is an approxiamtion where we've raplaced a (nsamples-1) by nsamples
00290             e = sqrt(pv_all_sumsquare[k]-pv_all_sum_k*m)*inv_pv_all_nsamples_k;
00291 
00292             // test to see if numerical problems
00293             if( fabs(m) < 1e-15 || e < 1e-15 )  {
00294                 //cout << "PvGradNNet::bpropUpdateNet() - small mean-error ratio." << endl;
00295                 n_small_ratios++;
00296                 continue;
00297             }
00298 
00299             // TODO - for current treatment, not necessary to compute actual prob.
00300             // Comparing the ratio would be sufficient.
00301             //prob_pos = gauss_01_cum(m/e);
00302             //prob_neg = 1.-prob_pos;
00303             //ratio = m/e;
00304 
00305             if(!pv_random_sample_step)  {
00306                 real threshold = limit_ratio*e;
00307                 // We adapt the stepsize before taking the step
00308                 // gradient is positive
00309                 //if(prob_pos>=pv_required_confidence)    {
00310                 //if(ratio>=limit_ratio)  {
00311                 if(m>=threshold)  {
00312                     //pv_all_stepsizes[k] *= (pv_all_stepsigns[k]?pv_acceleration:pv_deceleration);
00313                     if(pv_all_stepsigns[k]>0)   {
00314                         pv_all_stepsizes[k]*=pv_acceleration;
00315                         if( pv_all_stepsizes[k] > pv_max_stepsize )
00316                             pv_all_stepsizes[k] = pv_max_stepsize;
00317                     }
00318                     else if(pv_all_stepsigns[k]<0)  {
00319                         pv_all_stepsizes[k]*=pv_deceleration;
00320                         if( pv_all_stepsizes[k] < pv_min_stepsize )
00321                             pv_all_stepsizes[k] = pv_min_stepsize;
00322                     }
00323                     all_params[k] -= pv_all_stepsizes[k];
00324                     pv_all_stepsigns[k] = 1;
00325                     pv_all_nsamples[k]=0;
00326                     pv_all_sum[k]=0.0;
00327                     pv_all_sumsquare[k]=0.0;
00328                 }
00329                 // gradient is negative
00330                 //else if(prob_neg>=pv_required_confidence)   {
00331                 else if(m<=-threshold) {
00332                     //pv_all_stepsizes[k] *= ((!pv_all_stepsigns[k])?pv_acceleration:pv_deceleration);
00333                     if(pv_all_stepsigns[k]<0)   {
00334                         pv_all_stepsizes[k]*=pv_acceleration;
00335                         if( pv_all_stepsizes[k] > pv_max_stepsize )
00336                             pv_all_stepsizes[k] = pv_max_stepsize;
00337                     }
00338                     else if(pv_all_stepsigns[k]>0)  {
00339                         pv_all_stepsizes[k]*=pv_deceleration;
00340                         if( pv_all_stepsizes[k] < pv_min_stepsize )
00341                             pv_all_stepsizes[k] = pv_min_stepsize;
00342                     }
00343                     all_params[k] += pv_all_stepsizes[k];
00344                     pv_all_stepsigns[k] = -1;
00345                     pv_all_nsamples[k]=0;
00346                     pv_all_sum[k]=0.0;
00347                     pv_all_sumsquare[k]=0.0;
00348                 }
00349             }
00350             /*else  // random sample update direction (sign)
00351             {
00352                 bool ispos = (random_gen->binomial_sample(prob_pos)>0);
00353                 if(ispos) // picked positive
00354                     all_params[k] += pv_all_stepsizes[k];
00355                 else  // picked negative
00356                     all_params[k] -= pv_all_stepsizes[k];
00357                 pv_all_stepsizes[k] *= (pv_all_stepsigns[k]==ispos)?pv_acceleration :pv_deceleration;
00358                 pv_all_stepsigns[k] = ispos;
00359                 st.forget();
00360             }*/
00361         }
00362         //pv_all_nsamples[k] = ns; // *stat*
00363     }
00364 }
00365 
00372 void PvGradNNet::discountGrad()
00373 {
00374     int np = all_params.length();
00375     real m, e;//, prob_pos, prob_neg;
00376     int stepsign;
00377 
00378     // TODO bring the confidenca treatment up to date (see pvGrad)
00379     real ratio;
00380     real conf = pv_required_confidence;
00381     if( pv_conf_ct != 0.0 ) {
00382         conf += (1.0-pv_required_confidence) * (stage/stage+pv_conf_ct);
00383     }
00384     real limit_ratio = gauss_01_quantile(conf);
00385 
00386     // 
00387     real discount = pow(pv_other_discount,n_updates);
00388     n_updates = 0;
00389     if( discount < 0.001 )
00390         PLWARNING("PvGradNNet::discountGrad() - discount < 0.001 - that seems small...");
00391     real sd = pv_self_discount / pv_other_discount; // trick: apply this self discount
00392                                                     // and then discount
00393                                                     // everyone the same
00394     for(int k=0; k<np; k++) {
00395         // Perform soft invalidation
00396         pv_all_nsamples[k] *= discount;
00397         pv_all_sum[k] *= discount;
00398         pv_all_sumsquare[k] *= discount;
00399 
00400         // update stats
00401         pv_all_nsamples[k]++;
00402         pv_all_sum[k] += all_params_gradient[k];
00403         pv_all_sumsquare[k] += all_params_gradient[k] * all_params_gradient[k];
00404 
00405         if(pv_all_nsamples[k]>pv_min_samples)   {
00406             m = pv_all_sum[k] / pv_all_nsamples[k];
00407             e = real((pv_all_sumsquare[k] - square(pv_all_sum[k])/pv_all_nsamples[k])/(pv_all_nsamples[k]-1));
00408             e = sqrt(e/pv_all_nsamples[k]);
00409 
00410             // test to see if numerical problems
00411             if( fabs(m) < 1e-15 || e < 1e-15 )  {
00412                 //cout << "PvGradNNet::bpropUpdateNet() - small mean-error ratio." << endl;
00413                 n_small_ratios++;
00414                 continue;
00415             }
00416 
00417             // TODO - for current treatment, not necessary to compute actual
00418             // prob. Comparing the ratio would be sufficient.
00419             /*prob_pos = gauss_01_cum(m/e);
00420             prob_neg = 1.-prob_pos;
00421             if(prob_pos>=pv_required_confidence)
00422                 stepsign = 1;
00423             else if(prob_neg>=pv_required_confidence)
00424                 stepsign = -1;
00425             else
00426                 continue;*/
00427             ratio=m/e;
00428             if(ratio>=limit_ratio)
00429                 stepsign = 1;
00430             else if(ratio<=-limit_ratio)
00431                 stepsign = -1;
00432             else
00433                 continue;
00434 
00435             // consecutive steps of same sign, accelerate
00436             if( stepsign*pv_all_stepsigns[k]>0 )  {
00437                 pv_all_stepsizes[k]*=pv_acceleration;
00438                 if( pv_all_stepsizes[k] > pv_max_stepsize )
00439                     pv_all_stepsizes[k] = pv_max_stepsize;            
00440             // else if different signs decelerate
00441             }   else if( stepsign*pv_all_stepsigns[k]<0 )   {
00442                 pv_all_stepsizes[k]*=pv_deceleration;
00443                 if( pv_all_stepsizes[k] < pv_min_stepsize )
00444                     pv_all_stepsizes[k] = pv_min_stepsize;
00445             // else (previous sign was undetermined
00446             }//   else    {
00447             //}
00448             // step
00449             if( stepsign > 0 )
00450                 all_params[k] -= pv_all_stepsizes[k];
00451             else
00452                 all_params[k] += pv_all_stepsizes[k];
00453             pv_all_stepsigns[k] = stepsign;
00454             // soft invalidation of self
00455             pv_all_nsamples[k]*=sd;
00456             pv_all_sum[k]*=sd;
00457             pv_all_sumsquare[k]*=sd;
00458             n_updates++;
00459 
00460         }
00461     }
00462 }
00463 
00466 void PvGradNNet::neuronDiscountGrad()
00467 {
00468     real m, e;//, prob_pos, prob_neg;
00469     int stepsign;
00470 
00471     // TODO bring the confidenca treatment up to date (see pvGrad)
00472     real ratio;
00473     real conf = pv_required_confidence;
00474     if( pv_conf_ct != 0.0 ) {
00475         conf += (1.0-pv_required_confidence) * (stage/stage+pv_conf_ct);
00476     }
00477     real limit_ratio = gauss_01_quantile(conf);
00478 
00479     //
00480     real discount = pow(pv_other_discount,n_updates);
00481     real d;
00482     n_updates = 0;
00483     if( discount < 0.001 )
00484         PLWARNING("PvGradNNet::discountGrad() - discount < 0.001 - that seems small...");
00485     real sd = pv_self_discount / pv_other_discount; // trick: apply this self discount
00486                                                     // and then discount
00487                                                     // everyone the same
00488     sd /= pv_within_neuron_discount;
00489 
00490     // k is an index on all the parameters.
00491     // kk is an index on all neurons.
00492     for(int l=0,k=0,kk=0; l<n_layers-1; l++)    {
00493         for(int n=0; n<layer_sizes[l+1]; n++,kk++)   {
00494             d = discount * pow(pv_within_neuron_discount,n_neuron_updates[kk]);
00495             n_neuron_updates[kk]=0;
00496             for(int w=0; w<1+layer_sizes[l]; w++,k++)   {
00497 
00498                 // Perform soft invalidation
00499                 pv_all_nsamples[k] *= d;
00500                 pv_all_sum[k] *= d;
00501                 pv_all_sumsquare[k] *= d;
00502 
00503                 // update stats
00504                 pv_all_nsamples[k]++;
00505                 pv_all_sum[k] += all_params_gradient[k];
00506                 pv_all_sumsquare[k] += all_params_gradient[k] * all_params_gradient[k];
00507 
00508                 if(pv_all_nsamples[k]>pv_min_samples)   {
00509                     m = pv_all_sum[k] / pv_all_nsamples[k];
00510                     e = real((pv_all_sumsquare[k] - square(pv_all_sum[k])/pv_all_nsamples[k])/(pv_all_nsamples[k]-1));
00511                     e = sqrt(e/pv_all_nsamples[k]);
00512 
00513                     // test to see if numerical problems
00514                     if( fabs(m) < 1e-15 || e < 1e-15 )  {
00515                         cout << "PvGradNNet::bpropUpdateNet() - small mean-error ratio." << endl;
00516                         continue;
00517                     }
00518 
00519                     ratio=m/e;
00520                     if(ratio>=limit_ratio)
00521                         stepsign = 1;
00522                     else if(ratio<=-limit_ratio)
00523                         stepsign = -1;
00524                     else
00525                         continue;
00526 
00527                     // consecutive steps of same sign, accelerate
00528                     if( stepsign*pv_all_stepsigns[k]>0 )  {
00529                         pv_all_stepsizes[k]*=pv_acceleration;
00530                         if( pv_all_stepsizes[k] > pv_max_stepsize )
00531                             pv_all_stepsizes[k] = pv_max_stepsize;
00532                     // else if different signs decelerate
00533                     }   else if( stepsign*pv_all_stepsigns[k]<0 )   {
00534                         pv_all_stepsizes[k]*=pv_deceleration;
00535                         if( pv_all_stepsizes[k] < pv_min_stepsize )
00536                             pv_all_stepsizes[k] = pv_min_stepsize;
00537                     // else (previous sign was undetermined
00538                     }//   else    {
00539                     //}
00540                     // step
00541                     if( stepsign > 0 )
00542                         all_params[k] -= pv_all_stepsizes[k];
00543                     else
00544                         all_params[k] += pv_all_stepsizes[k];
00545                     pv_all_stepsigns[k] = stepsign;
00546                     // soft invalidation of self
00547                     pv_all_nsamples[k]*=sd;
00548                     pv_all_sum[k]*=sd;
00549                     pv_all_sumsquare[k]*=sd;
00550                     n_updates++;
00551                     n_neuron_updates[kk]++;
00552                 }
00553             }
00554         }
00555     }
00556 
00557 }
00558 
00559 void PvGradNNet::globalSyncGrad()
00560 {
00561 }
00562 
00563 void PvGradNNet::neuronSyncGrad()
00564 {
00565 }
00566 
00567 } // end of namespace PLearn
00568 
00569 
00570 /*
00571   Local Variables:
00572   mode:c++
00573   c-basic-offset:4
00574   c-file-style:"stroustrup"
00575   c-file-offsets:((innamespace . 0)(inline-open . 0))
00576   indent-tabs-mode:nil
00577   fill-column:79
00578   End:
00579 */
00580 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines