PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // RegressorFromDistribution.cc 00004 // 00005 // Copyright (C) 2006 Olivier Delalleau 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Olivier Delalleau 00036 00040 #include "RegressorFromDistribution.h" 00041 #include <plearn/vmat/ForwardVMatrix.h> 00042 00043 namespace PLearn { 00044 using namespace std; 00045 00046 PLEARN_IMPLEMENT_OBJECT( 00047 RegressorFromDistribution, 00048 "Regression from a distribution trained on both the input and target data", 00049 "This regressor outputs E[target|input], where this expectation is\n" 00050 "computed by an underlying conditional distribution, trained with a" 00051 "predictor part corresponding to the input part, and the predicted part\n" 00052 "corresponding to the target part.\n" 00053 ); 00054 00056 // RegressorFromDistribution // 00058 RegressorFromDistribution::RegressorFromDistribution() 00059 {} 00060 00061 void RegressorFromDistribution::declareOptions(OptionList& ol) 00062 { 00063 declareOption(ol, "distribution", &RegressorFromDistribution::distribution, 00064 OptionBase::buildoption, 00065 "The underlying (conditional) distribution. Its predictor and\n" 00066 "predicted sizes will be set automatically."); 00067 00068 // Now call the parent class' declareOptions 00069 inherited::declareOptions(ol); 00070 } 00071 00073 // build // 00075 void RegressorFromDistribution::build() 00076 { 00077 inherited::build(); 00078 build_(); 00079 } 00080 00082 // build_ // 00084 void RegressorFromDistribution::build_() 00085 { 00086 } 00087 00089 // makeDeepCopyFromShallowCopy // 00091 void RegressorFromDistribution::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00092 { 00093 inherited::makeDeepCopyFromShallowCopy(copies); 00094 deepCopyField(distribution, copies); 00095 } 00096 00098 // outputsize // 00100 int RegressorFromDistribution::outputsize() const 00101 { 00102 if (!distribution) 00103 return -1; 00104 else 00105 return distribution->getNPredicted(); 00106 } 00107 00109 // forget // 00111 void RegressorFromDistribution::forget() 00112 { 00113 inherited::forget(); 00114 if (distribution) 00115 distribution->forget(); 00116 } 00117 00119 // train // 00121 void RegressorFromDistribution::train() 00122 { 00123 if (!distribution) 00124 PLERROR("In RegressorFromDistribution::train - You need to specify a " 00125 "distribution before calling the train() method"); 00126 distribution->train(); 00127 } 00128 00130 // computeOutput // 00132 void RegressorFromDistribution::computeOutput(const Vec& input, Vec& output) const 00133 { 00134 PLASSERT( distribution ); 00135 distribution->setPredictor(input); 00136 distribution->expectation(output); 00137 } 00138 00140 // computeCostsFromOutputs // 00142 void RegressorFromDistribution::computeCostsFromOutputs( 00143 const Vec& input, const Vec& output, 00144 const Vec& target, Vec& costs) const 00145 { 00146 costs.resize(1); 00147 costs[0] = powdistance(target, output); 00148 } 00149 00151 // getTestCostNames // 00153 TVec<string> RegressorFromDistribution::getTestCostNames() const 00154 { 00155 static TVec<string> test_costs; 00156 if (test_costs.isEmpty()) 00157 test_costs.append("mse"); 00158 return test_costs; 00159 } 00160 00162 // getTrainCostNames // 00164 TVec<string> RegressorFromDistribution::getTrainCostNames() const 00165 { 00166 static TVec<string> no_train_cost; 00167 return no_train_cost; 00168 } 00169 00171 // setTrainingSet // 00173 void RegressorFromDistribution::setTrainingSet(VMat training_set, 00174 bool call_forget) 00175 { 00176 inherited::setTrainingSet(training_set, call_forget); 00177 PLASSERT( training_set->inputsize() >= 0 && 00178 training_set->targetsize() >= 1 ); 00179 // Create a dataset whose input part is the concatenation of the input and 00180 // target parts of the training set. 00181 VMat all_input_trainset = new ForwardVMatrix(training_set); 00182 all_input_trainset->build(); 00183 all_input_trainset->defineSizes(training_set->inputsize() + 00184 training_set->targetsize(), 0, training_set->weightsize(), 00185 training_set->extrasize()); 00186 PLASSERT( distribution ); 00187 // Note that 'call_forget' is set to false in the following call, because 00188 // if it was true, then distribution->forget() would have already been 00189 // called in this->forget(). 00190 distribution->setTrainingSet(all_input_trainset, false); 00191 // Set sizes for the underlying distribution. 00192 distribution->setPredictorPredictedSizes(training_set->inputsize(), 00193 training_set->targetsize()); 00194 } 00195 00196 } // end of namespace PLearn 00197 00198 00199 /* 00200 Local Variables: 00201 mode:c++ 00202 c-basic-offset:4 00203 c-file-style:"stroustrup" 00204 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00205 indent-tabs-mode:nil 00206 fill-column:79 00207 End: 00208 */ 00209 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :