PLearn 0.1
EntropyContrastLearner.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // EntropyContrastLearner.cc
00004 //
00005 // Copyright (C) 2004 Marius Muja 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: EntropyContrastLearner.cc 6861 2007-04-09 19:04:15Z saintmlx $ 
00037  ******************************************************* */
00038 
00039 // Authors: Marius Muja
00040 
00044 #include "EntropyContrastLearner.h"
00045 #include "plearn/var/NoBpropVariable.h"
00046 #include "plearn/var/DiagonalizedFactorsProductVariable.h"
00047 #include "plearn/display/DisplayUtils.h"
00048 #include <plearn/var/PDistributionVariable.h>
00049 #include <plearn/var/SVDVariable.h>
00050 #include <plearn/var/ExtractVariable.h>
00051 #include <plearn/var/OutputVariable.h>
00052 #include <plearn_learners/distributions/GaussianDistribution.h>
00053 #include <plearn/math/random.h>
00054 
00055 
00056 #define INDEX(i,j) (((i)*((i)+1))/2+(j))
00057 
00058 namespace PLearn {
00059 using namespace std;
00060 
00061 void displayVarGr(const Var& v, bool display_values)
00062 {
00063     displayVarGraph(v,display_values,200);
00064 }
00065 
00066 void displayVarFn(const Func& f,bool display_values)
00067 {
00068     displayFunction(f,display_values,200);
00069 }
00070 
00071 EntropyContrastLearner::EntropyContrastLearner() 
00072     : distribution("normal"),
00073       weight_real(1),
00074       weight_generated(1),
00075       weight_extra(1),
00076       weight_decay_hidden(0),
00077       weight_decay_output(0),
00078       normalize_constraints(true),
00079       save_best_params(true),
00080       sigma_generated(0.1),
00081       sigma_min_threshold(0.1),
00082       eps(0.0001),
00083       save_x_hat(false),
00084       gen_method("N(0,I)"),
00085       use_sigma_min_threshold(true)
00086 {
00087 
00088     // ### You may or may not want to call build_() to finish building the object
00089     // build_();
00090 }
00091 
00092 PLEARN_IMPLEMENT_OBJECT(EntropyContrastLearner, "ONE LINE DESCRIPTION", "MULTI-LINE \nHELP");
00093 
00094 void EntropyContrastLearner::declareOptions(OptionList& ol)
00095 {
00096     declareOption(ol, "nconstraints", &EntropyContrastLearner::nconstraints, OptionBase::buildoption,
00097                   "The number of constraints to create (that's also the outputsize)");
00098     declareOption(ol, "nhidden", &EntropyContrastLearner::nhidden, OptionBase::buildoption,
00099                   "the number of hidden units");
00100     declareOption(ol, "optimizer", &EntropyContrastLearner::optimizer, OptionBase::buildoption, 
00101                   "specify the optimizer to use\n");
00102     declareOption(ol, "distribution", &EntropyContrastLearner::distribution, OptionBase::buildoption, 
00103                   "the distribution to use\n");
00104     declareOption(ol, "weight_real", &EntropyContrastLearner::weight_real, OptionBase::buildoption, 
00105                   "the relative weight for the cost of the real data, for default is 1\n");
00106     declareOption(ol, "weight_generated", &EntropyContrastLearner::weight_generated, OptionBase::buildoption, 
00107                   "the relative weight for the cost of the generated data, for default is 1\n");
00108     declareOption(ol, "weight_extra", &EntropyContrastLearner::weight_extra, OptionBase::buildoption, 
00109                   "the relative weight for the extra cost, for default is 1\n");
00110     declareOption(ol, "weight_decay_hidden", &EntropyContrastLearner::weight_decay_hidden, OptionBase::buildoption, 
00111                   "decay factor for the hidden units\n");
00112     declareOption(ol, "weight_decay_output", &EntropyContrastLearner::weight_decay_output, OptionBase::buildoption, 
00113                   "decay factor for the output units\n");
00114     declareOption(ol, "normalize_constraints", &EntropyContrastLearner::normalize_constraints, OptionBase::buildoption, 
00115                   "normalize the output constraints\n");
00116     declareOption(ol, "save_best_params", &EntropyContrastLearner::save_best_params, OptionBase::buildoption, 
00117                   "specify if the best params are saved on each stage\n");
00118     declareOption(ol, "sigma_generated", &EntropyContrastLearner::sigma_generated, OptionBase::buildoption, 
00119                   "the sigma for the gaussian from which we get the generated data\n");
00120     declareOption(ol, "sigma_min_threshold", &EntropyContrastLearner::sigma_min_threshold, OptionBase::buildoption, 
00121                   "the minimum value for each element of sigma of the computed features\n");
00122     declareOption(ol, "eps", &EntropyContrastLearner::eps, OptionBase::buildoption, 
00123                   "we ignore singular values smaller than this.\n");
00124     declareOption(ol, "gradient_scaling", &EntropyContrastLearner::gradient_scaling, OptionBase::buildoption, 
00125                   "");
00126     declareOption(ol, "save_x_hat", &EntropyContrastLearner::save_x_hat, OptionBase::buildoption, 
00127                   "Save generated data to a file(for debug purposes).");
00128     declareOption(ol, "gen_method", &EntropyContrastLearner::gen_method, OptionBase::buildoption, 
00129                   "The method used to generate new points.");
00130     declareOption(ol, "use_sigma_min_threshold", &EntropyContrastLearner::use_sigma_min_threshold, OptionBase::buildoption, 
00131                   "Specify if the sigma of the features should be limited.");
00132     
00133 
00134 
00135     // Now call the parent class' declareOptions
00136     inherited::declareOptions(ol);
00137 }
00138 
00139 void EntropyContrastLearner::build_()
00140 {
00141     manual_seed(time(NULL)); 
00142 
00143     if (train_set) {
00144 
00145         // input data
00146         int n = inputsize();
00147         x = Var(n, "input");
00148 
00149         V_save.resize(nconstraints*nhidden*inputsize());
00150         V_b_save.resize(nconstraints*nhidden);
00151 
00152         V.resize(nconstraints);
00153         V_b.resize(nconstraints);
00154         for(int k=0 ; k<nconstraints ; ++k) { 
00155             V[k] = Var(nhidden,inputsize(),("V_"+tostring(k)).c_str());
00156             V_b[k] = Var(nhidden,1,("V_b_"+tostring(k)).c_str());
00157             params.push_back(V[k]);
00158             params.push_back(V_b[k]);
00159         }
00160 
00161 
00162         int W_size = (nconstraints*(nconstraints+1))/2;
00163         W.resize(W_size);
00164         W_b.resize(nconstraints);
00165 
00166         W_save.resize(W_size*nhidden);
00167         W_b_save.resize(nconstraints);
00168         
00169         for(int i=0 ; i<nconstraints ; ++i) { 
00170             for(int j=0 ; j<=i ; ++j) { 
00171                 W[INDEX(i,j)] = Var(1,nhidden,("W_"+tostring(i)+tostring(j)).c_str());
00172                 params.push_back(W[INDEX(i,j)]);
00173             }
00174             W_b[i] = Var(1,1,("W_b_"+tostring(i)).c_str());
00175             params.push_back(W_b[i]);
00176         }
00177 
00178         
00179 
00180         // hidden layer
00181         VarArray hf(nconstraints);
00182 
00183         for(int k=0 ; k<nconstraints ; ++k) { 
00184             hf[k] = tanh(product(V[k],x)+V_b[k]);
00185         }
00186         
00187 
00188         
00189         // network output
00190         VarArray f(nconstraints);
00191 
00192         for(int i=0 ; i<nconstraints ; ++i) { 
00193             for(int j=i ; j>=0 ; --j) { 
00194                 if (j==i) {
00195                     f[i] = product(W[INDEX(i,j)],hf[j]) + W_b[i];
00196                 } else {
00197                     f[i] = f[i] + product(W[INDEX(i,j)],no_bprop(hf[j]));
00198                 }
00199             }
00200         }
00201         
00202         VarArray hg(nconstraints);
00203 
00204         for(int k=0 ; k<nconstraints ; ++k) { 
00205             hg[k] = (1-square(tanh(product(V[k],x)+V_b[k] )))*V[k];
00206         }
00207 
00208         g.resize(nconstraints);
00209 
00210         for(int i=0 ; i<nconstraints ; ++i) { 
00211             for(int j=i ; j>=0 ; --j) { 
00212                 if (j==i) {
00213                     g[i] = product(W[INDEX(i,j)],hg[j]);
00214                 } else {
00215                     g[i] = g[i] + product(W[INDEX(i,j)],no_bprop(hg[j]));
00216                 }
00217             }
00218         }
00219 
00220         // generated data
00221         PP<GaussianDistribution> dist = new GaussianDistribution();
00222         Vec eig_values(n); 
00223         Mat eig_vectors(n,n); eig_vectors.clear();
00224         for(int i=0; i<n; i++)
00225         {
00226             eig_values[i] = 0.1; 
00227             eig_vectors(i,i) = 1.0;
00228         }
00229         dist->mu = Vec(n);
00230         dist->eigenvalues = eig_values;
00231         dist->eigenvectors = eig_vectors;
00232 
00233         PP<PDistribution> temp;
00234         temp = dist;
00235 
00236         x_hat = new PDistributionVariable(x,temp);
00237 
00238         if (gen_method=="local_gaussian") {
00239 
00240             Var grad = transpose(vconcat(g));
00241             Var gs = Var(1,nconstraints);
00242             gs->value << gradient_scaling;
00243 
00244             grad = grad*invertElements(gs);
00245 
00246             Var svd_vec = svd(grad);
00247 
00248             int M = inputsize();
00249             int N = nconstraints;
00250 
00251             Var U = extract(svd_vec,0,M,M);
00252             Var D = extract(svd_vec,M*M+N*N,M,1);
00253 
00254             Var sigma_1 = Var(M,1,"sigma_1");
00255             sigma_1->matValue.fill(sigma_generated);
00256 
00257             Var eps_var = Var(M,1,"epsilon");
00258             eps_var->matValue.fill(1/eps);
00259 
00260             Var zero = Var(M);
00261             zero->matValue.fill(0);
00262             Var sigma1;
00263             sigma1 = 5*square(invertElements(min(ifThenElse(D>zero,D,eps_var))));
00264 
00265             Var one = Var(M);
00266             one->matValue.fill(1);
00267             
00268             D = ifThenElse(D>zero,invertElements(square(D)+1e-10),sigma1*one);
00269 
00270             x_hat = no_bprop(product(U,(sqrt(D)*x_hat))+x);    
00271         }
00272 
00273         if (save_x_hat) {
00274             x_hat = output_var(x_hat,"x_hat.dat");
00275         }
00276         
00277 
00278         VarArray hf_hat(nconstraints);
00279 
00280         for(int k=0 ; k<nconstraints ; ++k) { 
00281             hf_hat[k] = tanh(product(V[k],x_hat)+V_b[k]);
00282         }
00283 
00284         VarArray f_hat(nconstraints);
00285 
00286         for(int i=0 ; i<nconstraints ; ++i) { 
00287             for(int j=i ; j>=0 ; --j) { 
00288                 if (j==i) {
00289                     f_hat[i] = product(W[INDEX(i,j)],hf_hat[j]) + W_b[i];
00290                 } else {
00291                     f_hat[i] = f_hat[i] + product(W[INDEX(i,j)],no_bprop(hf_hat[j]));
00292                 }
00293             }
00294         }
00295 
00296 
00297         // extra cost - to keep constrains perpendicular
00298         Var extra_cost;
00299         for(int i=0 ; i<nconstraints ; ++i) { 
00300             for(int j=i+1 ; j<nconstraints ; ++j) { 
00301                 Var tmp = no_bprop(g[i]);
00302                 if (extra_cost.isNull()) {
00303                     extra_cost = square(dot(tmp,g[j])/product(norm(tmp),norm(g[j])));
00304                 } else {
00305                     extra_cost = extra_cost + square(dot(tmp,g[j])/product(norm(tmp),norm(g[j])));
00306                 }
00307             }
00308         }
00309 
00310         Var f_var = hconcat(f);
00311         Var f_hat_var = hconcat(f_hat);
00312 
00313         Var c_entropy;
00314 
00315         if (distribution=="normal") {
00316           
00317             mu = Var(1,nconstraints,"mu");
00318             params.push_back(mu);
00319             sigma = Var(1,nconstraints,"sigma");
00320             params.push_back(sigma);
00321 
00322             mu_hat = Var(1,nconstraints,"mu_hat");
00323             params.push_back(mu_hat);
00324             sigma_hat = Var(1,nconstraints,"sigma_hat");
00325             params.push_back(sigma_hat);
00326 
00327             Var c_mu = square(no_bprop(f_var)-mu);
00328             c_mu->setName("mu cost");
00329             Var c_sigma = square(sigma-square(no_bprop(c_mu)));
00330             c_sigma->setName("sigma cost");
00331 
00332             if (use_sigma_min_threshold) {
00333                 Var sigma_min = Var(1,nconstraints);
00334                 sigma_min->matValue.fill(sigma_min_threshold);
00335                 sigma = max(sigma,no_bprop(sigma_min));
00336             }
00337 
00338             Var c_mu_hat = square(no_bprop(f_hat_var)-mu_hat);
00339             c_mu_hat->setName("generated mu cost");
00340             Var c_sigma_hat = square(sigma_hat-square(no_bprop(c_mu_hat)));
00341             c_sigma_hat->setName("generated sigma cost");
00342             
00343             c_entropy = weight_real*square(f_var-no_bprop(mu))/no_bprop(sigma) - 
00344                 weight_generated*square(f_hat_var-no_bprop(mu_hat))/no_bprop(sigma_hat);
00345             c_entropy->setName("entropy cost");
00346 
00347             costs = c_entropy & c_mu & c_sigma & c_mu_hat & c_sigma_hat;
00348             
00349             if (nconstraints>1) {
00350                 costs &= weight_extra*extra_cost;
00351             }        
00352             if (weight_decay_hidden>0) {
00353                 costs &= weight_decay_hidden*sumsquare(hconcat(V));
00354             }
00355             if (weight_decay_output>0) {
00356                 costs &= weight_decay_output*sumsquare(hconcat(W));
00357             }
00358         } 
00359         else if (distribution=="student") {
00360             c_entropy = weight_real*log(real(1)+square(f_var)) - weight_generated*log(real(1)+square(f_hat_var));
00361             
00362             costs.push_back(c_entropy);
00363             
00364             if (nconstraints>1) {
00365                 costs &= weight_extra*extra_cost;
00366             }
00367             if (weight_decay_hidden>0) {
00368                 costs &= weight_decay_hidden*sumsquare(hconcat(V));
00369             }
00370             if (weight_decay_output>0) {
00371                 costs &= weight_decay_output*sumsquare(hconcat(W));
00372             }
00373         }
00374 
00375 
00376         training_cost = sum(hconcat(costs));
00377         training_cost->setName("cost");
00378 
00379 
00380         f_output = Func(x, hconcat(g)); 
00381     }
00382 }
00383 
00384 // ### Nothing to add here, simply calls build_
00385 void EntropyContrastLearner::build()
00386 {
00387     inherited::build();
00388     build_();
00389 }
00390 
00391 
00392 void EntropyContrastLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00393 {
00394     inherited::makeDeepCopyFromShallowCopy(copies);
00395 
00396     // ### Call deepCopyField on all "pointer-like" fields 
00397     // ### that you wish to be deepCopied rather than 
00398     // ### shallow-copied.
00399     // ### ex:
00400     // deepCopyField(trainvec, copies);
00401 
00402     // ### Remove this line when you have fully implemented this method.
00403     PLERROR("EntropyContrastLearner::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00404 }
00405 
00406 
00407 int EntropyContrastLearner::outputsize() const
00408 {
00409     return nconstraints;
00410 }
00411 
00412 void EntropyContrastLearner::forget()
00413 {
00414 
00415     if (train_set) initializeParams();
00416     stage = 0;
00417 }
00418 
00419 void EntropyContrastLearner::train()
00420 {
00421     if(!train_stats)  // make a default stats collector, in case there's none
00422         train_stats = new VecStatsCollector();
00423 
00424     int l = train_set->length();
00425     int nsamples = 1;
00426     Func paramf = Func(x, training_cost); // parameterized function to optimize
00427     //displayFunction(paramf);
00428 
00429     Var totalcost = meanOf(train_set, paramf, nsamples);
00430     if(optimizer)
00431     {
00432         optimizer->setToOptimize(params, totalcost);  
00433         optimizer->build();
00434         optimizer->reset();
00435     }
00436     else PLERROR("EntropyContrastLearner::train can't train without setting an optimizer first!");
00437     PP<ProgressBar> pb;
00438     if(report_progress>0) {
00439         pb = new ProgressBar("Training EntropyContrastLearner from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
00440     }
00441 
00442     real min_cost = 1e10;
00443 
00444     int optstage_per_lstage = l/nsamples;
00445     while(stage<nstages)
00446     {
00447         optimizer->nstages = optstage_per_lstage;
00448 
00449         // clear statistics of previous epoch
00450         train_stats->forget();
00451 
00452         optimizer->optimizeN(*train_stats);
00453 
00454         train_stats->finalize(); // finalize statistics for this epoch
00455 
00456 
00457         if (save_best_params) {
00458 
00459             if (fabs(training_cost->valuedata[0])<min_cost) {
00460                 min_cost = fabs(training_cost->valuedata[0]);
00461                 V.copyTo(V_save);
00462                 //            V_b.copyTo(V_b_save);
00463                 W.copyTo(W_save);
00464                 //          W_b.copyTo(W_b_save);
00465             }
00466         }
00467 
00468 
00469         if (verbosity>0) {
00470             cout << "Stage: " << stage << ", training cost: " << training_cost->matValue;
00471 
00472 
00473             //            for(int i=0 ; i<W.length() ; ++i) { 
00474             //                cout << W[i] << "\n";
00475             //            }
00476 
00477             //            cout << "---------------------------------------\n";
00478             cout << sigma << "\n";
00479             for(int i=0 ; i<costs.length() ; ++i) { 
00480                 cout << costs[i] << "\n";
00481             }
00482             cout << "---------------------------------------\n";
00483 
00484 
00485         }
00486         ++stage;
00487         if(pb) {
00488             pb->update(stage);
00489         }
00490     }
00491 
00492     if (save_best_params) {
00493         V.copyFrom(V_save);
00494         //   V_b.copyFrom(V_b_save);
00495         W.copyFrom(W_save);
00496         // W_b.copyFrom(W_b_save);
00497     }
00498 
00499     Vec x_(inputsize());
00500     Vec g_(inputsize()*nconstraints);
00501 
00502     ofstream file1("gen.dat");
00503     for(int t=0 ; t<200 ; ++t) { 
00504         train_set->getRow(t,x_);
00505 
00506         f_output->fprop(x_,g_);
00507 
00508         file1 << x_ << " ";
00509 
00510         for(int k=0 ; k<nconstraints ; ++k) { 
00511             int is = inputsize();
00512             Vec tmp(is);
00513 
00514             tmp = g_.subVec(k*is,is);
00515             normalize(tmp,2);
00516             tmp /= 15;
00517 
00518             file1 << tmp << " ";
00519         }
00520         file1 << "\n";
00521 
00522     }
00523     file1.close();
00524 
00525 }
00526 
00527 
00528 void EntropyContrastLearner::computeOutput(const Vec& input, Vec& output) const
00529 {
00530     int nout = inputsize()*nconstraints;
00531     output.resize(nout);
00532     
00533 
00534     f_output->fprop(input,output);
00535 
00536     if (normalize_constraints) {
00537         int is = inputsize();
00538         for(int k=0 ; k<nconstraints ; ++k) { 
00539             Vec tmp(is);
00540 
00541             tmp = output.subVec(k*is,is);
00542             normalize(tmp,2);
00543             tmp /= 15;
00544         }
00545     }
00546 }    
00547 
00548 void EntropyContrastLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00549                                                      const Vec& target, Vec& costs) const
00550 {
00551     // Compute the costs from *already* computed output. 
00552     // ...
00553 }                                
00554 
00555 TVec<string> EntropyContrastLearner::getTestCostNames() const
00556 {
00557     // Return the names of the costs computed by computeCostsFromOutpus
00558     // (these may or may not be exactly the same as what's returned by getTrainCostNames).
00559     TVec<string> ret;
00560     return ret;
00561 }
00562 
00563 TVec<string> EntropyContrastLearner::getTrainCostNames() const
00564 {
00565     // Return the names of the objective costs that the train method computes and 
00566     // for which it updates the VecStatsCollector train_stats
00567     // (these may or may not be exactly the same as what's returned by getTestCostNames).
00568     TVec<string> ret;
00569     return ret;
00570 }
00571 
00572 void EntropyContrastLearner::initializeParams()
00573 {
00574     real delta = 1; //1.0 / sqrt(real(inputsize()));
00575     for(int k=0 ; k<nconstraints ; ++k) { 
00576         fill_random_uniform(V[k]->matValue, -delta, delta);
00577         fill_random_uniform(V_b[k]->matValue, -delta, delta);
00578         fill_random_uniform(W_b[k]->matValue, -delta, delta);
00579 //      V_b[k]->matValue.fill(0);
00580 //      W_b[k]->matValue.fill(0);
00581     }
00582     delta = 1;//1.0 / real(nhidden);
00583     for(int k=0 ; k<((nconstraints*(nconstraints+1))/2) ; ++k) { 
00584         fill_random_uniform(W[k]->matValue, -delta, delta);
00585     }
00586 
00587     if (distribution=="normal") {
00588         mu->matValue.fill(0);
00589         sigma->matValue.fill(1);
00590         mu_hat->matValue.fill(0);
00591         sigma_hat->matValue.fill(1);
00592     }
00593 }
00594 
00595 } // end of namespace PLearn
00596 
00597 
00598 /*
00599   Local Variables:
00600   mode:c++
00601   c-basic-offset:4
00602   c-file-style:"stroustrup"
00603   c-file-offsets:((innamespace . 0)(inline-open . 0))
00604   indent-tabs-mode:nil
00605   fill-column:79
00606   End:
00607 */
00608 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines