PLearn 0.1
DiverseComponentAnalysis.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DiverseComponentAnalysis.cc
00004 //
00005 // Copyright (C) 2008 Pascal Vincent
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Vincent
00036 
00040 #include "DiverseComponentAnalysis.h"
00041 #include <plearn/vmat/VMat_basic_stats.h>
00042 #include<plearn/math/TMat_maths.h>
00043 #include<plearn/var/ProductVariable.h>
00044 #include<plearn/var/ProductTransposeVariable.h>
00045 #include<plearn/var/TransposeProductVariable.h>
00046 #include<plearn/var/SquareVariable.h>
00047 #include<plearn/var/AbsVariable.h>
00048 #include<plearn/var/SquareRootVariable.h>
00049 #include<plearn/var/ExpVariable.h>
00050 #include<plearn/var/TimesVariable.h>
00051 #include<plearn/var/SumVariable.h>
00052 #include<plearn/var/SigmoidVariable.h>
00053 #include<plearn/var/TanhVariable.h>
00054 #include<plearn/var/NegateElementsVariable.h>
00055 #include<plearn/var/TimesConstantVariable.h>
00056 #include<plearn/var/SumSquareVariable.h>
00057 #include<plearn/var/RowSumSquareVariable.h>
00058 #include<plearn/var/EXPERIMENTAL/ConstrainedSourceVariable.h>
00059 #include<plearn/var/EXPERIMENTAL/Cov2CorrVariable.h>
00060 #include<plearn/var/EXPERIMENTAL/DiagVariable.h>
00061 #include<plearn/var/EXPERIMENTAL/NonDiagVariable.h>
00062 #include<plearn/var/TransposeVariable.h>
00063 #include<plearn/var/ColumnSumVariable.h>
00064 #include<plearn/var/Var_operators.h>
00065 
00066 namespace PLearn {
00067 using namespace std;
00068 
00069 PLEARN_IMPLEMENT_OBJECT(
00070     DiverseComponentAnalysis,
00071     "Diverse Component Analysis",
00072     "This is an experimental class that finds linear\n"
00073     "projection directions that should yield\n"
00074     "'diverse' components, based on some diversity loss");
00075 
00076 DiverseComponentAnalysis::DiverseComponentAnalysis()
00077     :ncomponents(2),
00078      nonlinearity("none"),
00079      cov_transformation_type("cov"),
00080      diag_add(0.),
00081      diag_premul(1.0),
00082      offdiag_premul(1.0),
00083      diag_nonlinearity("square"),
00084      offdiag_nonlinearity("square"),
00085      diag_weight(-1.0),
00086      offdiag_weight(1.0),
00087      force_zero_mean(false),
00088      epsilon(1e-8),
00089      nu(0),
00090      constrain_norm_type(-2),
00091      normalize(false)
00092 /* ### Initialize all fields to their default value here */
00093 {
00094     // ### If this learner needs to generate random numbers, uncomment the
00095     // ### line below to enable the use of the inherited PRandom object.
00096     random_gen = new PRandom();
00097 }
00098 
00099 void DiverseComponentAnalysis::declareOptions(OptionList& ol)
00100 {
00101     // ### Declare all of this object's options here.
00102     // ### For the "flags" of each option, you should typically specify
00103     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00104     // ### OptionBase::tuningoption. If you don't provide one of these three,
00105     // ### this option will be ignored when loading values from a script.
00106     // ### You can also combine flags, for example with OptionBase::nosave:
00107     // ### (OptionBase::buildoption | OptionBase::nosave)
00108 
00109     // ### ex:
00110     // declareOption(ol, "myoption", &DiverseComponentAnalysis::myoption,
00111     //               OptionBase::buildoption,
00112     //               "Help text describing this option");
00113     // ...
00114 
00115     declareOption(
00116         ol, "nonlinearity", &DiverseComponentAnalysis::nonlinearity, OptionBase::buildoption,
00117         "The nonlinearity to apply after linear transformation of the inputs to obtain the representation.");
00118 
00119     declareOption(
00120         ol, "force_zero_mean", &DiverseComponentAnalysis::force_zero_mean, OptionBase::buildoption,
00121         "If true then input mean won't be computes but forced to 0 (and a corresponding different covariance matrix will be computed)");
00122 
00123     declareOption(
00124         ol, "epsilon", &DiverseComponentAnalysis::epsilon, OptionBase::buildoption,
00125         "regularization value to add to diagonal of computed input covariance matrix.");
00126 
00127     declareOption(
00128         ol, "nu", &DiverseComponentAnalysis::nu, OptionBase::buildoption,
00129         "regularization parameter simulating destruction noise: \n"
00130         "off-diagonal elements of covariance matrix Cx will be multiplied by 1-nu.");
00131 
00132     declareOption(
00133         ol, "constrain_norm_type", &DiverseComponentAnalysis::constrain_norm_type, OptionBase::buildoption,
00134         "How to constrain the norms of rows of W: \n"
00135         "  -1: L1 norm constrained source \n"
00136         "  -2: L2 norm constrained source \n"
00137         "  -3:explicit L2 normalization \n"
00138         "  >0:add specified value times exp(sumsquare(W)) to the cost\n");
00139 
00140     declareOption(
00141         ol, "ncomponents", &DiverseComponentAnalysis::ncomponents, OptionBase::buildoption,
00142         "The number components to keep (that's also the outputsize).");
00143 
00144     declareOption(
00145         ol, "cov_transformation_type", &DiverseComponentAnalysis::cov_transformation_type, OptionBase::buildoption,
00146         "Controls the kind of transformation to apply to covariance matrix\n"
00147         "cov: no transformation (keep covariance)\n"
00148         "corr: transform into correlations, but keeping variances on the diagonal.\n"
00149         "squaredist: do a 'squared distance kernel' Dij <- Cii+Cjj-2Cij kind of transformation\n"
00150         "sincov: instead of ||u|| ||v|| cos(angle(u,v)) we transform it to ||u|| ||v|| |sin(angle(u,v))|\n"
00151         "        this is computed as sqrt((1-<u.v>^2) * <u,u>^2 * <v,v>^2) where <u,v> is given by the covariance matrix\n");
00152 
00153     declareOption(
00154         ol, "diag_add", &DiverseComponentAnalysis::diag_add, OptionBase::buildoption,
00155         "This value will be added to the diagonal (before premultiplying and applying non-linearity)");
00156 
00157     declareOption(
00158         ol, "diag_premul", &DiverseComponentAnalysis::diag_premul, OptionBase::buildoption,
00159         "diagonal elements of Cy will be pre-multiplied by diag_premul (before applying non-linearity)");
00160 
00161     declareOption(
00162         ol, "offdiag_premul", &DiverseComponentAnalysis::offdiag_premul, OptionBase::buildoption,
00163         "Non-diagonal elements of Cy will be pre-multiplied by diag_premul (before applying non-linearity)");
00164 
00165     declareOption(
00166         ol, "diag_nonlinearity", &DiverseComponentAnalysis::diag_nonlinearity, OptionBase::buildoption,
00167         "The kind of nonlinearity to apply to the diagonal elements of Cy\n"
00168         "after it's been through cov_transformation_type\n"
00169         "Currently supported: none square abs sqrt sqrtabs exp tanh sigmoid");
00170 
00171     declareOption(
00172         ol, "offdiag_nonlinearity", &DiverseComponentAnalysis::offdiag_nonlinearity, OptionBase::buildoption,
00173         "The kind of nonlinearity to apply to the non-diagonal elements of Cy \n"
00174         "after it's been through cov_transformation_type\n"
00175         "Currently supported: none square abs sqrt sqrtabs exp tanh sigmoid");
00176 
00177     declareOption(
00178         ol, "diag_weight", &DiverseComponentAnalysis::diag_weight, OptionBase::buildoption,
00179         "what weight to give to the sum of transformed diagonal elements in the cost");
00180 
00181     declareOption(
00182         ol, "offdiag_weight", &DiverseComponentAnalysis::offdiag_weight, OptionBase::buildoption,
00183         "what weight to give to the sum of transformed non-diagonal elements in the cost");
00184 
00185     declareOption(
00186         ol, "optimizer", &DiverseComponentAnalysis::optimizer, OptionBase::buildoption,
00187         "The gradient-based optimizer to use");
00188 
00189     declareOption(
00190         ol, "normalize", &DiverseComponentAnalysis::normalize, OptionBase::buildoption,
00191         "If true computed outputs will be scaled so they have unit variance.\n"
00192         "(see explanation about inv_stddev_of_projections)");
00193 
00194 
00195     // learnt options
00196     declareOption(
00197         ol, "mu", &DiverseComponentAnalysis::mu, OptionBase::learntoption,
00198         "The (weighted) mean of the samples");
00199 
00200     declareOption(
00201         ol, "Cx", &DiverseComponentAnalysis::Cx, OptionBase::learntoption,
00202         "The (weighted) covariance of the samples");
00203 
00204     declareOption(
00205         ol, "W", &DiverseComponentAnalysis::W, OptionBase::learntoption,
00206         "A ncomponents x inputsize matrix containing the learnt projection directions");
00207 
00208     declareOption(
00209         ol, "bias", &DiverseComponentAnalysis::bias, OptionBase::learntoption,
00210         "A 1 x ncomponents matrix containing the learnt bias (for the nonlinear case only)");
00211 
00212     declareOption(
00213         ol, "inv_stddev_of_projections", &DiverseComponentAnalysis::inv_stddev_of_projections, OptionBase::learntoption,
00214         "As its name implies, this is one over the standard deviation of projected data.\n"
00215         "when normalize=true computeOutput will multiply the projection by this,\n"
00216         " elementwise, so that the output should have unit variance" );
00217 
00218     // Now call the parent class' declareOptions
00219     inherited::declareOptions(ol);
00220 }
00221 
00222 void DiverseComponentAnalysis::declareMethods(RemoteMethodMap& rmm)
00223 {
00224     rmm.inherited(inherited::_getRemoteMethodMap_());
00225 
00226     declareMethod(rmm,
00227                   "getVarValue",
00228                   &DiverseComponentAnalysis::getVarValue,
00229                   (BodyDoc("Returns the matValue of the variable with the given name"),
00230                    ArgDoc("varname", "name of the variable searched for"),
00231                    RetDoc("Returns the value of the var as a Mat")));
00232 
00233     declareMethod(rmm,
00234                   "getVarGradient",
00235                   &DiverseComponentAnalysis::getVarGradient,
00236                   (BodyDoc("Returns the matGradient of the variable with the given name"),
00237                    ArgDoc("varname", "name of the variable searched for"),
00238                    RetDoc("Returns the gradient of the var as a Mat")));
00239 
00240     declareMethod(rmm,
00241                   "listVarNames",
00242                   &DiverseComponentAnalysis::listVarNames,
00243                   (BodyDoc("Returns a list of the names of all vars"),
00244                    RetDoc("Returns a list of the names of all vars")));
00245 
00246 }
00247 
00248 Var DiverseComponentAnalysis::nonlinear_transform(Var in, string nonlinearity)
00249 {
00250     Var res; // result
00251     if(nonlinearity=="none" || nonlinearity=="linear")
00252         res = in;
00253     else if(nonlinearity=="square")
00254         res = square(in);
00255     else if(nonlinearity=="abs")
00256         res = abs(in);
00257     else if(nonlinearity=="sqrt")
00258         res = squareroot(in);
00259     else if(nonlinearity=="sqrtabs")
00260         res = squareroot(abs(in));
00261     else if(nonlinearity=="exp")
00262         res = exp(in);
00263     else if(nonlinearity=="tanh")
00264         res = tanh(in);
00265     else if(nonlinearity=="sigmoid")
00266         res = sigmoid(in);
00267     else
00268         PLERROR("Unknown nonlinearity %s",nonlinearity.c_str());
00269     return res;
00270 }
00271 
00272 void DiverseComponentAnalysis::build_()
00273 {
00274     perr << "Entering DiverseComponentAnalysis::build_()" << endl;
00275     bool rebuild_all = inputsize_>0 && (W.isNull() || (W->matValue.width()!=inputsize_));
00276     bool rebuild_some = inputsize_>0 && Cyt.isNull();
00277     bool linear = (nonlinearity=="none" || nonlinearity=="linear");
00278     if(rebuild_some || rebuild_all)
00279     {
00280         perr << "Building with inputsize_ = " << inputsize_ << endl;
00281 
00282         Var nW;
00283         
00284         if(constrain_norm_type==-1) // use constrainted source to constrain L1 norms to 1
00285         {
00286             perr << "using constrainted source to constrain L1 norms to 1" << endl;
00287             if(rebuild_all)
00288                 W = new ConstrainedSourceVariable(ncomponents,inputsize_,1);
00289             nW = W;
00290         }
00291         else if(constrain_norm_type==-2) // use constrainted source to constrain L2 norms to 1
00292         {
00293             perr << "using constrainted source to constrain L2 norms to 1" << endl;
00294             if(rebuild_all)
00295                 W = new ConstrainedSourceVariable(ncomponents,inputsize_,2);
00296             nW = W;
00297         }
00298         else if(constrain_norm_type==-3) // compute L2 normalization explicitly
00299         {
00300             perr << "Normalizing explicitly" << endl;
00301             if(rebuild_all)
00302                 W = Var(ncomponents,inputsize_);
00303             nW = W/squareroot(rowSumSquare(W));
00304         }
00305         else  // using ordinary weight decay: nW is not hard-constrained to be normalized
00306         {
00307             perr << "Using ordinary weight decay " << constrain_norm_type << endl;
00308             if(rebuild_all)
00309                 W = Var(ncomponents,inputsize_);
00310             nW = W;
00311         }
00312 
00313         if(linear) 
00314         {
00315             if(rebuild_all)
00316                 Cx = Var(inputsize_,inputsize_);
00317             Cx->setName("Cx");
00318             Cy = product(nW, productTranspose(Cx, nW));
00319         }
00320         else // nonlinear trasform
00321         {
00322             int l = train_set->length();
00323             perr << "Building with nonlinear transform and l="<<l <<" examples of inputsize=" << inputsize_ << endl;
00324 
00325             inputdata = Var(l,inputsize_);
00326             if(rebuild_all)
00327                 bias = Var(1, ncomponents);
00328             trdata = productTranspose(inputdata,nW)+bias;
00329             perr << "USING MAIN REPRESENTATION NONLINEARITY: " << nonlinearity << endl;
00330             trdata = nonlinear_transform(trdata,nonlinearity);
00331             if(force_zero_mean)
00332                 ctrdata = trdata;
00333             else
00334                 ctrdata = trdata-(1.0/l)*columnSum(trdata);
00335             ctrdata->setName("ctrdata");            
00336             trdata->setName("trdata");            
00337             Cy = (1.0/l)*transposeProduct(ctrdata,ctrdata);
00338         }
00339         perr << "Built Cy of size " << Cy->length() << "x" << Cy->width() << endl;
00340 
00341         if(cov_transformation_type=="cov")
00342             Cyt = Cy;
00343         else if(cov_transformation_type=="corr")
00344             Cyt = cov2corr(Cy,2);
00345         else if(cov_transformation_type=="squaredist")
00346         {
00347             Var dCy = diag(Cy);
00348             Cyt = Cy*(-2.0)+dCy+transpose(dCy);
00349         }
00350         else if(cov_transformation_type=="sincov")
00351         {
00352             Var dCy = diag(Cy);
00353             Cyt = squareroot(((1+1e-6)-square(cov2corr(Cy)))*dCy*transpose(dCy));            
00354             // Cyt = ((1.0-square(cov2corr(Cy)))*dCy*transpose(dCy));            
00355         }
00356         else 
00357             PLERROR("Invalid cov_transformation_type");
00358 
00359         if(diag_weight!=0)
00360         {
00361             Var diagelems = diag(Cyt);
00362             if(diag_add!=0)
00363                 diagelems = diagelems+diag_add;
00364             L += diag_weight*sum(nonlinear_transform(diagelems*diag_premul,diag_nonlinearity));
00365         }
00366         if(offdiag_weight!=0)
00367             L += offdiag_weight*sum(nonlinear_transform(nondiag(Cyt)*offdiag_premul,offdiag_nonlinearity));
00368             
00369         if(constrain_norm_type>0)
00370             L += L+constrain_norm_type*exp(sumsquare(W));
00371 
00372         if(!optimizer)
00373             PLERROR("You must specify the optimizer field (ex: GradientOptimizer)");
00374         if(linear)
00375             optimizer->setToOptimize(W, L);
00376         else
00377             optimizer->setToOptimize(W&bias, L);
00378         
00379         perr << "Built optimizer" << endl;
00380         nW->setName("W");
00381         Cy->setName("Cy");
00382         Cyt->setName("Cyt");
00383         L->setName("L");
00384 
00385         allvars = Cx & trdata& ctrdata& nW & Cy & Cyt & L;
00386     }
00387     perr << "Exiting DiverseComponentAnalysis::build_()" << endl;
00388 }
00389 
00390 
00391 TVec<string> DiverseComponentAnalysis::listVarNames() const
00392 {
00393     int n = allvars.length();
00394     TVec<string> names;
00395     for(int i=0; i<n; i++)
00396         if(allvars[i].isNotNull())
00397             names.append(allvars[i]->getName());
00398     return names;
00399 }
00400 
00401 Mat DiverseComponentAnalysis::getVarValue(string varname) const
00402 {
00403     for(int i=0; i<allvars.length(); i++)
00404     {
00405         Var v = allvars[i];        
00406         if(v.isNotNull() && v->getName()==varname)
00407             return v->matValue;
00408     }
00409     PLERROR("No Var with name %s", varname.c_str());
00410     return Mat();
00411 }
00412 
00413 Mat DiverseComponentAnalysis::getVarGradient(string varname) const
00414 {
00415     for(int i=0; i<allvars.length(); i++)
00416     {
00417         Var v = allvars[i];
00418         if(v.isNotNull() && v->getName()==varname)
00419             return v->matGradient;
00420     }
00421     PLERROR("No Var with name %s", varname.c_str());
00422     return Mat();
00423 }
00424 
00425 void DiverseComponentAnalysis::build()
00426 {
00427     inherited::build();
00428     build_();
00429 }
00430 
00431 
00432 void DiverseComponentAnalysis::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00433 {
00434     inherited::makeDeepCopyFromShallowCopy(copies);
00435     deepCopyField(mu, copies);
00436     deepCopyField(Cx, copies);
00437     deepCopyField(W, copies);
00438 }
00439 
00440 
00441 int DiverseComponentAnalysis::outputsize() const
00442 {
00443     return ncomponents;
00444 }
00445 
00446 void DiverseComponentAnalysis::forget()
00447 {
00451 
00459     // this will reset stage=0 and reset the random_gen to the initial seed_
00460     inherited::forget();
00461 
00462     perr << "Called DCS::forget() with inputsize_ = " << inputsize_ << endl;
00463     if(inputsize_>0)
00464     {
00465         random_gen->fill_random_normal(W->value, 0., 1.);
00466         perr << "Squared norm of first row of W after fill_random_normal: " << pownorm(W->matValue(0)) << endl;
00467         int normval = (constrain_norm_type==-1 ?1 :2);
00468         for(int i=0; i<ncomponents; i++)
00469             PLearn::normalize(W->matValue(i), normval);
00470         perr << "Squared norm of first row of W after L" << normval << " normalization: " << pownorm(W->matValue(0)) << endl;
00471     }
00472 }
00473 
00474 void DiverseComponentAnalysis::train()
00475 {
00476     // The role of the train method is to bring the learner up to
00477     // stage==nstages, updating train_stats with training costs measured
00478     // on-line in the process.
00479 
00480     /* TYPICAL CODE:
00481 
00482     static Vec input;  // static so we don't reallocate memory each time...
00483     static Vec target; // (but be careful that static means shared!)
00484     input.resize(inputsize());    // the train_set's inputsize()
00485     target.resize(targetsize());  // the train_set's targetsize()
00486     real weight;
00487 
00488     // This generic PLearner method does a number of standard stuff useful for
00489     // (almost) any learner, and return 'false' if no training should take
00490     // place. See PLearner.h for more details.
00491     */
00492 
00493     if (!initTrain())
00494         return;
00495 
00496     while(stage<nstages)
00497     {
00498         // clear statistics of previous epoch
00499         train_stats->forget();
00500 
00501         if(stage==0) // do stage 1
00502         {
00503             bool linear = (nonlinearity=="none" || nonlinearity=="linear");
00504             if(!linear)
00505             {
00506                 perr << "Nonlinear training to stage 1" << endl;
00507                 Mat X = inputdata->matValue;
00508                 int l = train_set->length();
00509                 Vec target;
00510                 real weight;
00511                 for(int i=0; i<l; i++)
00512                 {
00513                     Vec Xi = X(i);
00514                     train_set->getExample(i,Xi,target,weight);
00515                 }
00516                 mu.resize(inputsize_);
00517                 columnMean(X, mu);
00518                 perr << "Nonlinear training to stage 1. DONE." << endl;
00519             }
00520             else // linear case
00521             {
00522                 if(force_zero_mean)
00523                 {
00524                     mu.resize(inputsize());
00525                     mu.fill(0);
00526                     computeInputCovar(train_set, mu, Cx->matValue, epsilon);
00527                 }
00528                 else
00529                     computeInputMeanAndCovar(train_set, mu, Cx->matValue, epsilon);
00530 
00531                 if(nu!=0)
00532                 {
00533                     Mat C = Cx->matValue;
00534                     int l = C.length();
00535                     for(int i=0; i<l; i++)
00536                         for(int j=0; j<l; j++)
00537                             if(i!=j)
00538                                 C(i,j) *= (1-nu);
00539                 }
00540             }
00541         }
00542         else
00543         {
00544             optimizer->optimizeN(*train_stats);
00545             Mat C = Cy->matValue;
00546             int l = C.length();            
00547             inv_stddev_of_projections.resize(l);
00548             for(int i=0; i<l; i++)
00549                 inv_stddev_of_projections.fill(1.0/sqrt(C(i,i)));
00550         }
00551 
00552         //... train for 1 stage, and update train_stats,
00553         // using train_set->getExample(input, target, weight)
00554         // and train_stats->update(train_costs)
00555 
00556         ++stage;
00557         train_stats->finalize(); // finalize statistics for this epoch
00558     }
00559 }
00560 
00561 
00562 void DiverseComponentAnalysis::computeOutput(const Vec& input, Vec& output) const
00563 {
00564     static Vec x;
00565     x.resize(input.length());
00566     x << input;
00567 
00568     // Center and project on directions
00569     x -= mu;
00570     output.resize(ncomponents);
00571     product(output, W->matValue, x);
00572     if(normalize)
00573         output *= inv_stddev_of_projections;
00574 }
00575 
00576 void DiverseComponentAnalysis::computeCostsFromOutputs(const Vec& input, const Vec& output,
00577                                            const Vec& target, Vec& costs) const
00578 {
00579     costs.resize(0);
00580 }
00581 
00582 TVec<string> DiverseComponentAnalysis::getTestCostNames() const
00583 {
00584     return TVec<string>();
00585 }
00586 
00587 TVec<string> DiverseComponentAnalysis::getTrainCostNames() const
00588 {
00589     return TVec<string>(1,"L");
00590 }
00591 
00592 
00593 } // end of namespace PLearn
00594 
00595 
00596 /*
00597   Local Variables:
00598   mode:c++
00599   c-basic-offset:4
00600   c-file-style:"stroustrup"
00601   c-file-offsets:((innamespace . 0)(inline-open . 0))
00602   indent-tabs-mode:nil
00603   fill-column:79
00604   End:
00605 */
00606 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines