PLearn 0.1
VariableSelectionWithDirectedGradientDescent.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // VariableSelectionWithDirectedGradientDescent.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* **************************************************************************************************************    
00038  * $Id: VariableSelectionWithDirectedGradientDescent.cc, v 1.0 2005/01/15 10:00:00 Bengio/Kegl/Godbout        *
00039  * This file is part of the PLearn library.                                                                   *
00040  ************************************************************************************************************** */
00041 
00042 #include "VariableSelectionWithDirectedGradientDescent.h"
00043 #include <plearn/base/tostring.h>
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 VariableSelectionWithDirectedGradientDescent::VariableSelectionWithDirectedGradientDescent()
00049     : learning_rate(1e-2)
00050 {
00051 }
00052 
00053 PLEARN_IMPLEMENT_OBJECT(VariableSelectionWithDirectedGradientDescent,
00054                         "Variable selection algorithm", 
00055                         "Variable selection algorithm using a linear density estimator and\n"
00056                         "directed gradient descent to identify most relevant variables.\n"
00057                         "\n"
00058                         "There are 4 options to set:\n"
00059                         "   learning_rate, the gradient step to be used by the descent algorithm,\n"
00060                         "   nstages, the number of epoch to be performed by the algorithm,\n"
00061                         "   verbosity, the level of information you want to get while in progress,\n"
00062                         "   report_progress, whether a progress bar should inform you of the progress.\n"
00063                         "\n"
00064                         "If both verbosity > 1 and report_progress is not zero, it works but it is ugly.\n"
00065                         "\n"
00066                         "The selected variables are returned in the selected_variables vector in\n"
00067                         "the order of their selection. The vector is a learnt option of the algorithm.\n"
00068                         "\n"
00069                         "The target should be binary, with values 0 and 1. It can be multi-dimensional,\n"
00070                         "in which case a different predictor is learned for each target, with all\n"
00071                         "predictors sharing the same set of variables. Note that the cost is currently\n"
00072                         "only computed for the first target.\n"
00073     );
00074 
00075 void VariableSelectionWithDirectedGradientDescent::declareOptions(OptionList& ol)
00076 { 
00077     declareOption(ol, "learning_rate", &VariableSelectionWithDirectedGradientDescent::learning_rate, OptionBase::buildoption,
00078                   "The learning rate of the gradient descent algorithm.\n");
00079     declareOption(ol, "input_weights", &VariableSelectionWithDirectedGradientDescent::input_weights, OptionBase::learntoption,
00080                   "The lerant weights of the linear probability estimator.\n");
00081     declareOption(ol, "weights_selected", &VariableSelectionWithDirectedGradientDescent::weights_selected, OptionBase::learntoption,
00082                   "The vector that identifies the non-zero weights.\n");
00083     declareOption(ol, "selected_variables", &VariableSelectionWithDirectedGradientDescent::selected_variables, OptionBase::learntoption,
00084                   "The vector with the selected variables in the order of their selection.\n");
00085     inherited::declareOptions(ol);
00086 }
00087 
00088 void VariableSelectionWithDirectedGradientDescent::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00089 {
00090     inherited::makeDeepCopyFromShallowCopy(copies);
00091     deepCopyField(input_weights, copies);
00092     deepCopyField(weights_selected, copies);
00093     deepCopyField(selected_variables, copies);
00094     deepCopyField(sample_input, copies);
00095     deepCopyField(sample_target, copies);
00096     deepCopyField(sample_weight, copies);
00097     deepCopyField(sample_output, copies);
00098     deepCopyField(sample_cost, copies);
00099     deepCopyField(train_criterion, copies);
00100     deepCopyField(weights_gradient, copies);
00101     deepCopyField(sum_of_abs_gradient, copies);
00102 }
00103 
00104 void VariableSelectionWithDirectedGradientDescent::build()
00105 {
00106     inherited::build();
00107     build_();
00108 }
00109 
00110 void VariableSelectionWithDirectedGradientDescent::build_()
00111 {
00112 }
00113 
00115 // train //
00117 void VariableSelectionWithDirectedGradientDescent::train()
00118 {
00119     if (!train_set)
00120         PLERROR("VariableSelectionWithDirectedGradientDescent: the algorithm has not been properly built");
00121     if (stage == 0) {
00122         // Initialize stuff before training.
00123         length = train_set->length();
00124         width = train_set->width();
00125         if (length < 1)
00126             PLERROR("VariableSelectionWithDirectedGradientDescent: the training set must contain at least one sample, got %d", length);
00127         inputsize = train_set->inputsize();
00128         targetsize = train_set->targetsize();
00129         weightsize = train_set->weightsize();
00130         if (inputsize < 1)
00131             PLERROR("VariableSelectionWithDirectedGradientDescent: expected  inputsize greater than 0, got %d", inputsize);
00132         if (targetsize <= 0)
00133             PLERROR("In VariableSelectionWithDirectedGradientDescent::train - The targetsize (%d) must be >= 1", targetsize);
00134         if (weightsize != 0)
00135             PLERROR("VariableSelectionWithDirectedGradientDescent: expected weightsize to be 1, got %d", weightsize_);
00136         input_weights.resize(targetsize, inputsize + 1);
00137         weights_selected.resize(inputsize + 1);
00138         weights_gradient.resize(targetsize, inputsize + 1);
00139         sample_input.resize(inputsize);
00140         sample_target.resize(1);
00141         sample_output.resize(1);
00142         sample_cost.resize(1);
00143         train_criterion.resize(targetsize);
00144         sum_of_abs_gradient.resize(inputsize);
00145     }
00146 
00147     input_weights.fill(0);
00148     weights_selected.fill(false);
00149     if (report_progress)
00150     {
00151         pb = new ProgressBar("VariableSelectionWithDirectedGradientDescent : train stages: ", nstages);
00152     }
00153 /*
00154   We loop through the data for the specified maximum number of stages.
00155 */
00156     for (; stage < nstages; stage++)
00157     {
00158         weights_gradient.fill(0);
00159 /*
00160   We compute the train criterion for this stage and compute the weight gradient.
00161 */
00162         train_criterion.fill(0);
00163         for (int i = 0; i < targetsize; i++) {
00164             for (row = 0; row < length; row++)
00165             {
00166                 real target = train_set(row, inputsize + i);
00167                 if (is_missing(target))
00168                     continue;
00169                 n7_value = input_weights(i, inputsize);
00170                 for (col = 0; col < inputsize; col++)
00171                 {
00172                     n7_value += input_weights(i, col) * train_set(row, col);
00173                 }
00174 #ifdef BOUNDCHECK
00175                 if (!fast_exact_is_equal(target, 0.0) &&
00176                     !fast_exact_is_equal(target, 1.0))
00177                     PLERROR("In VariableSelectionWithDirectedGradientDescent::train - The target should be 0 or 1");
00178 #endif
00179                 if (fast_exact_is_equal(target, 0)) target = -1; // We work with -1 and 1 instead.
00180                 n8_value = target * n7_value;
00181                 n9_value = 1.0 / (1.0 + exp(-n8_value));
00182                 n10_value = -pl_log(n9_value);
00183                 train_criterion[i] += n10_value;
00184                 n10_gradient = 1.0;
00185                 n9_gradient = n10_gradient * (-1.0 / n9_value);
00186                 n8_gradient = n9_gradient * n9_value * 1.0 / (1.0 + exp(n8_value));
00187                 n7_gradient = n8_gradient * target;
00188                 for (col = 0; col < inputsize; col++)
00189                 {
00190                     weights_gradient(i, col) += n7_gradient * train_set(row, col);
00191                 }
00192                 weights_gradient(i, inputsize) += n7_gradient;     
00193             }
00194         }
00195 /*
00196   We perform this stage weight update according to the directed gradient descent algorithm.
00197 */
00198         sum_of_abs_gradient.fill(0);
00199         for (int i = 0; i < targetsize; i++) {
00200             // Bias update.
00201             input_weights(i, inputsize) -= learning_rate * weights_gradient(i, inputsize);
00202             // Compute sum of |gradient|.
00203             for (int j = 0; j < inputsize; j++)
00204                 sum_of_abs_gradient[j] += fabs(weights_gradient(i,j));
00205         }
00206         weights_gradient_max = 0.0;
00207         for (col = 0; col < inputsize; col++)
00208         {
00209             if (sum_of_abs_gradient[col] > weights_gradient_max)
00210             {
00211                 weights_gradient_max = sum_of_abs_gradient[col];
00212                 weights_gradient_max_col = col;
00213             }
00214         }
00215         if (!weights_selected[weights_gradient_max_col])
00216         {
00217             selected_variables.append(weights_gradient_max_col);
00218             verbose("VariableSelectionWithDirectedGradientDescent: variable " + tostring(weights_gradient_max_col)
00219                     + " was added.", 2);
00220         }
00221         weights_selected[weights_gradient_max_col] = true;
00222         // Weights update.
00223         for (int i = 0; i < targetsize; i++)
00224             for (col = 1; col < inputsize; col++)
00225                 input_weights(i, col) -= learning_rate * weights_gradient(i, col) * real(weights_selected[col]);
00226         verbose("VariableSelectionWithDirectedGradientDescent: After " + tostring(stage) + " stages, the train criterion is: "
00227                 + tostring(train_criterion), 3);
00228         if (report_progress) pb->update(stage);
00229     }
00230     if (report_progress)
00231     {
00232         pb = new ProgressBar("VariableSelectionWithDirectedGradientDescent : computing the training statistics: ", length);
00233     }
00234     train_stats->forget();
00235     for (row = 0; row < length; row++)
00236     {   
00237         train_set->getExample(row, sample_input, sample_target, sample_weight);
00238         for (int i = 0; i < sample_target.length(); i++)
00239             if (fast_exact_is_equal(sample_target[i], 0)) sample_target[i] = -1; // We work with -1 and 1.
00240         computeOutput(sample_input, sample_output);
00241         computeCostsFromOutputs(sample_input, sample_output, sample_target, sample_cost);
00242         train_stats->update(sample_cost);
00243         if (report_progress) pb->update(row);
00244     }
00245     train_stats->finalize();
00246     verbose("VariableSelectionWithDirectedGradientDescent: After " + tostring(stage) + " stages, average error is: "
00247             + tostring(train_stats->getMean()), 1);
00248 }
00249 
00250 void VariableSelectionWithDirectedGradientDescent::verbose(string the_msg, int the_level)
00251 {
00252     if (verbosity >= the_level)
00253         pout << the_msg << endl;
00254 }
00255 
00256 void VariableSelectionWithDirectedGradientDescent::forget()
00257 {
00258     inputsize = -1; // For safety reasons.
00259     selected_variables.resize(0);
00260     stage = 0;
00261 }
00262 
00263 int VariableSelectionWithDirectedGradientDescent::outputsize() const
00264 {
00265     return targetsize;
00266 }
00267 
00269 // getTrainCostNames //
00271 TVec<string> VariableSelectionWithDirectedGradientDescent::getTrainCostNames() const
00272 {
00273     TVec<string> return_msg(1);
00274     return_msg[0] = "negloglikelihood";
00275     return return_msg;
00276 }
00277 
00279 // getTestCostNames //
00281 TVec<string> VariableSelectionWithDirectedGradientDescent::getTestCostNames() const
00282 { 
00283     return getTrainCostNames();
00284 }
00285 
00287 // computeOutput //
00289 void VariableSelectionWithDirectedGradientDescent::computeOutput(const Vec& inputv, Vec& outputv) const
00290 {
00291     outputv.resize(targetsize);
00292     for (int i = 0; i < targetsize; i++) {
00293         outputv[i] = input_weights(i, inputsize);
00294         for (int col = 0; col < inputsize; col++)
00295         {
00296             outputv[i] += input_weights(i, col) * inputv[col];
00297         }
00298     }
00299 }
00300 
00302 // computeCostsFromOutputs //
00304 void VariableSelectionWithDirectedGradientDescent::computeCostsFromOutputs(const Vec& inputv, const Vec& outputv, 
00305                                                                            const Vec& targetv, Vec& costsv) const
00306 {
00307     if (is_missing(outputv[0]))
00308     {
00309         costsv[0] = MISSING_VALUE;
00310         return;
00311         // ???  return MISSING_VALUE;
00312     }
00313     // Note that the "2 * target - 1" operation is only here to transform a 0/1
00314     // target into -1/1.
00315     costsv[0] = -pl_log(1.0 / (1.0 + exp(-(2.0 * targetv[0] - 1) * outputv[0])));;
00316 }
00317 
00319 // setTrainingSet //
00321 void VariableSelectionWithDirectedGradientDescent::setTrainingSet(VMat training_set, bool call_forget) {
00322     targetsize = training_set->targetsize();
00323     inherited::setTrainingSet(training_set, call_forget);
00324 }
00325 
00326 } // end of namespace PLearn
00327 
00328 
00329 /*
00330   Local Variables:
00331   mode:c++
00332   c-basic-offset:4
00333   c-file-style:"stroustrup"
00334   c-file-offsets:((innamespace . 0)(inline-open . 0))
00335   indent-tabs-mode:nil
00336   fill-column:79
00337   End:
00338 */
00339 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines