PLearn 0.1
|
00001 00002 // -*- C++ -*- 00003 00004 // ConstantRegressor.cc 00005 // 00006 // Copyright (C) 2003 *AUTHOR(S)* 00007 // 00008 // Redistribution and use in source and binary forms, with or without 00009 // modification, are permitted provided that the following conditions are met: 00010 // 00011 // 1. Redistributions of source code must retain the above copyright 00012 // notice, this list of conditions and the following disclaimer. 00013 // 00014 // 2. Redistributions in binary form must reproduce the above copyright 00015 // notice, this list of conditions and the following disclaimer in the 00016 // documentation and/or other materials provided with the distribution. 00017 // 00018 // 3. The name of the authors may not be used to endorse or promote 00019 // products derived from this software without specific prior written 00020 // permission. 00021 // 00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00032 // 00033 // This file is part of the PLearn library. For more information on the PLearn 00034 // library, go to the PLearn Web site at www.plearn.org 00035 00036 /* ******************************************************* 00037 * $Id: ConstantRegressor.cc 5660 2006-05-24 21:38:36Z saintmlx $ 00038 ******************************************************* */ 00039 00041 #include "ConstantRegressor.h" 00042 00043 namespace PLearn { 00044 using namespace std; 00045 00046 ConstantRegressor::ConstantRegressor() 00047 : weight_decay(0.0) 00048 { 00049 } 00050 00051 PLEARN_IMPLEMENT_OBJECT( 00052 ConstantRegressor, 00053 "PLearner that outputs a constant (input-independent) vector.\n", 00054 "ConstantRegressor is a PLearner that outputs a constant (input-independent\n" 00055 "but training-data-dependent) vector. It is a regressor (i.e. during training\n" 00056 "the constant vector is chosen to minimize the (possibly weighted) average\n" 00057 "of the training set targets. Let\n" 00058 " N = number of training examples,\n" 00059 " M = target size (= output size),\n" 00060 " y_{ij} = the jth target value of the ith training example,\n" 00061 " w_i = weight associated to the ith training example,\n" 00062 "then the j-th component of the learned vector is\n" 00063 " (sum_{i=1}^N w_i * y_ij) / (sum_{i=1}^N w_i)\n" 00064 "The output can also be set manually with the 'constant_output' vector option\n" 00065 "The only supported cost for both train and test is \"mse\"\n."); 00066 00067 void ConstantRegressor::declareOptions(OptionList& ol) 00068 { 00069 // ### Declare all of this object's options here 00070 // ### For the "flags" of each option, you should typically specify 00071 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00072 // ### OptionBase::tuningoption. Another possible flag to be combined with 00073 // ### is OptionBase::nosave 00074 00075 declareOption(ol, "weight_decay", &ConstantRegressor::weight_decay, 00076 OptionBase::buildoption, 00077 "Weight decay parameter. Default=0. NOT CURRENTLY TAKEN INTO ACCOUNT!"); 00078 00079 // ### ex: 00080 declareOption(ol, "constant_output", &ConstantRegressor::constant_output, 00081 OptionBase::learntoption, 00082 "This is the learnt parameter, the constant output. During training\n" 00083 "It is set to the (possibly weighted) average of the targets.\n" 00084 ); 00085 00086 // Now call the parent class' declareOptions 00087 inherited::declareOptions(ol); 00088 } 00089 00090 void ConstantRegressor::build_() 00091 { 00092 } 00093 00094 // ### Nothing to add here, simply calls build_ 00095 void ConstantRegressor::build() 00096 { 00097 inherited::build(); 00098 build_(); 00099 } 00100 00101 00102 void ConstantRegressor::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00103 { 00104 inherited::makeDeepCopyFromShallowCopy(copies); 00105 } 00106 00107 00108 int ConstantRegressor::outputsize() const 00109 { 00110 return targetsize(); 00111 } 00112 00113 void ConstantRegressor::forget() 00114 { 00115 // Since this is a one-shot learner, there is nothing to forget. 00116 } 00117 00118 void ConstantRegressor::train() 00119 { 00120 // The role of the train method is to bring the learner up to stage==nstages, 00121 // updating train_stats with training costs measured on-line in the process. 00122 00123 Vec input; // Not static because God knows who may be using a ConstantRegressor. 00124 Vec target; 00125 Vec train_costs; 00126 Vec sum_of_weighted_targets; 00127 real weight; 00128 train_costs.resize(1); 00129 input.resize(inputsize()); // the train_set's inputsize() 00130 target.resize(targetsize()); // the train_set's targetsize() 00131 sum_of_weighted_targets.resize(targetsize()); // the running sum of weighted targets 00132 constant_output.resize(targetsize()); 00133 00134 if(!train_stats) // make a default stats collector, in case there's none 00135 train_stats = new VecStatsCollector(); 00136 00137 real sum_of_weights = 0; 00138 sum_of_weighted_targets.clear(); 00139 00140 int n_examples = train_set->length(); 00141 for (int i=0;i<n_examples;i++) 00142 { 00143 train_set->getExample(i, input, target, weight); 00144 00145 // Skip the observation if it has any missings... (for now, next 00146 // version should only skip the components that have a missing value) 00147 if (target.hasMissing()) 00148 continue; 00149 00150 multiplyAdd(sum_of_weighted_targets,target,weight,sum_of_weighted_targets); 00151 sum_of_weights += weight; 00152 multiply(sum_of_weighted_targets,real(1.0/sum_of_weights),constant_output); 00153 train_costs[0] = 00154 weight*powdistance(constant_output,target); 00155 train_stats->update(train_costs); 00156 } 00157 train_stats->finalize(); // finalize statistics for this one and only epoch 00158 } 00159 00160 00161 void ConstantRegressor::computeOutput(const Vec& input, Vec& output) const 00162 { 00163 // Compute the output from the input 00164 output.resize(outputsize()); 00165 output << constant_output; 00166 } 00167 00168 void ConstantRegressor::computeCostsFromOutputs(const Vec& input, const Vec& output, 00169 const Vec& target, Vec& costs) const 00170 { 00171 // Compute the costs from *already* computed output. 00172 costs.resize(1); 00173 costs[0] = powdistance(output,target); 00174 } 00175 00176 TVec<string> ConstantRegressor::getTestCostNames() const 00177 { 00178 // Return the names of the costs computed by computeCostsFromOutpus 00179 return getTrainCostNames(); 00180 } 00181 00182 TVec<string> ConstantRegressor::getTrainCostNames() const 00183 { 00184 // Return the names of the objective costs that the train method computes and 00185 // for which it updates the VecStatsCollector train_stats 00186 return TVec<string>(1,"mse"); 00187 } 00188 00189 00190 00191 } // end of namespace PLearn 00192 00193 00194 /* 00195 Local Variables: 00196 mode:c++ 00197 c-basic-offset:4 00198 c-file-style:"stroustrup" 00199 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00200 indent-tabs-mode:nil 00201 fill-column:79 00202 End: 00203 */ 00204 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :