PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // SVMClassificationTorch.cc 00004 // 00005 // Copyright (C) 2005 Olivier Delalleau 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: SVMClassificationTorch.cc 6351 2006-10-25 19:05:45Z chapados $ 00037 ******************************************************* */ 00038 00039 // Authors: Olivier Delalleau 00040 00044 #include "SVMClassificationTorch.h" 00045 #include <plearn_torch/TTorchDataSetFromVMat.h> 00046 #include <plearn_torch/TTorchKernelFromKernel.h> 00047 #include <plearn_torch/TMachine.h> 00048 #include <plearn_torch/TSVMClassification.h> 00049 #include <plearn_torch/TQCTrainer.h> 00050 #include <plearn_torch/TTrainer.h> 00051 00052 #include <plearn/vmat/ProcessingVMatrix.h> 00053 00054 namespace PLearn { 00055 using namespace std; 00056 00058 // SVMClassificationTorch // 00060 SVMClassificationTorch::SVMClassificationTorch() 00061 : C(100), 00062 cache_size(50), 00063 iter_msg(1000), 00064 output_the_class(true), 00065 target_01(false) 00066 {} 00067 00068 PLEARN_IMPLEMENT_OBJECT(SVMClassificationTorch, 00069 "SVM classification using the Torch library", 00070 "Do not do anything that needs this object to be deep-copied, because it\n" 00071 "is not possible yet.\n" 00072 "Only binary classification is currently supported. By default, the\n" 00073 "target should be -1 or 1. You can use 0 and 1 by setting the option\n" 00074 "'target_01' to 1.\n" 00075 ); 00076 00078 // declareOptions // 00080 void SVMClassificationTorch::declareOptions(OptionList& ol) 00081 { 00082 // ### For the "flags" of each option, you should typically specify 00083 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00084 // ### OptionBase::tuningoption. Another possible flag to be combined with 00085 // ### is OptionBase::nosave 00086 00087 // Build options. 00088 00089 declareOption(ol, "kernel", &SVMClassificationTorch::kernel, OptionBase::buildoption, 00090 "The kernel we use."); 00091 00092 declareOption(ol, "C", &SVMClassificationTorch::C, OptionBase::buildoption, 00093 "Trade-off margin / error."); 00094 00095 declareOption(ol, "output_the_class", &SVMClassificationTorch::output_the_class, OptionBase::buildoption, 00096 "If set to 1, the output will be the class, otherwise it will be a real value."); 00097 00098 declareOption(ol, "target_01", &SVMClassificationTorch::target_01, 00099 OptionBase::buildoption, 00100 "If set to 1, the target in the training set will be assumed to be\n" 00101 "either 0 or 1 (instead of the default -1 / 1)."); 00102 00103 declareOption(ol, "iter_msg", &SVMClassificationTorch::iter_msg, OptionBase::buildoption, 00104 "Number of iterations between each message."); 00105 00106 declareOption(ol, "cache_size", &SVMClassificationTorch::cache_size, OptionBase::buildoption, 00107 "Cache size (in Mb)."); 00108 00109 // Learnt options. 00110 00111 // declareOption(ol, "myoption", &SVMClassificationTorch::myoption, OptionBase::learntoption, 00112 // "Help text describing this option"); 00113 00114 // Now call the parent class' declareOptions. 00115 inherited::declareOptions(ol); 00116 00117 // Redeclare some parent's options. 00118 redeclareOption(ol, "machine", &SVMClassificationTorch::machine, OptionBase::learntoption, 00119 "Constructed at build time and saved to store learnt parameters."); 00120 00121 redeclareOption(ol, "trainer", &SVMClassificationTorch::trainer, OptionBase::nosave, 00122 "Constructed at build time (there is no need to save it)."); 00123 00124 } 00125 00127 // build // 00129 void SVMClassificationTorch::build() 00130 { 00131 inherited::build(); 00132 build_(); 00133 } 00134 00136 // build_ // 00138 void SVMClassificationTorch::build_() 00139 { 00140 // Build machine. 00141 if (!machine) 00142 machine = new TSVMClassification(); 00143 PP<TSVMClassification> svm_class = (TSVMClassification*) (TMachine*) machine; 00144 svm_class->C = this->C; 00145 svm_class->cache_size = this->cache_size; 00146 svm_class->kernel = new TTorchKernelFromKernel(this->kernel); 00147 svm_class->build(); 00148 // Build trainer. 00149 if (!trainer) 00150 trainer = new TQCTrainer(); 00151 PP<TQCTrainer> qc_trainer = (TQCTrainer*) (TTrainer*) this->trainer; 00152 qc_trainer->qc_machine = (TQCMachine*) (TMachine*) this->machine; 00153 qc_trainer->iter_msg = this->iter_msg; 00154 qc_trainer->build(); 00155 // We can now build the TorchLearner. 00156 inherited::build(); 00157 } 00158 00160 // computeCostsFromOutputs // 00162 void SVMClassificationTorch::computeCostsFromOutputs(const Vec& input, const Vec& output, 00163 const Vec& target, Vec& costs) const 00164 { 00165 // No cost computed. 00166 // For safety, we check we are trying to do binary classification with -1 00167 // and 1, or 0 and 1 when the 'target_01' option is set. 00168 PLASSERT( target.length() == 1 && 00169 ((target_01 && (target[0] == 1 || target[0] == 0)) || 00170 (!target_01 && (target[0] == 1 || target[0] == -1))) ); 00171 PLASSERT( output.length() == 1 ); 00172 costs.resize(1); 00173 real sig_output = sigmoid(output[0]); 00174 if (fast_exact_is_equal(sig_output, 0)) 00175 sig_output = REAL_EPSILON; 00176 if (fast_exact_is_equal(target[0], 1)) 00177 costs[0] = sig_output; 00178 else 00179 costs[0] = - sig_output; 00180 } 00181 00183 // computeOutput // 00185 void SVMClassificationTorch::computeOutput(const Vec& input, Vec& output) const 00186 { 00187 inherited::computeOutput(input, output); 00188 if (output_the_class) 00189 for (int i = 0; i < output.length(); i++) 00190 output[i] = output[i] > 0 ? 1 : target_01 ? 0 : -1; 00191 } 00192 00193 #if 0 00194 00195 // forget // 00197 void SVMClassificationTorch::forget() 00198 { 00201 00207 } 00208 #endif 00209 00211 // getTestCostNames // 00213 TVec<string> SVMClassificationTorch::getTestCostNames() const 00214 { 00215 static TVec<string> costs; 00216 if (costs.isEmpty()) 00217 costs.append("lift_output"); 00218 return costs; 00219 } 00220 00222 // getTrainCostNames // 00224 TVec<string> SVMClassificationTorch::getTrainCostNames() const 00225 { 00226 return inherited::getTrainCostNames(); 00227 } 00228 00230 // makeDeepCopyFromShallowCopy // 00232 void SVMClassificationTorch::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00233 { 00234 inherited::makeDeepCopyFromShallowCopy(copies); 00235 00236 // ### ex: 00237 // deepCopyField(trainvec, copies); 00238 00239 // ### Remove this line when you have fully implemented this method. 00240 PLERROR("SVMClassificationTorch::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!"); 00241 } 00242 00244 // setTrainingSet // 00246 void SVMClassificationTorch::setTrainingSet(VMat training_set, bool call_forget) { 00247 VMat the_train_set = training_set; 00248 if (target_01) { 00249 // Create processing program. 00250 int target_col = training_set->inputsize(); 00251 PLASSERT( target_col > 0 ); 00252 string prog = "[%0:%" + tostring(target_col - 1) + "] %" + 00253 tostring(target_col) + " 1 == 1 -1 ifelse :target"; 00254 if (training_set->weightsize() > 0) { 00255 int weight_col = training_set->inputsize() + 00256 training_set->targetsize(); 00257 prog += " [%" + tostring(weight_col) + ":%" + 00258 tostring(weight_col + training_set->weightsize() - 1) + "]"; 00259 } 00260 the_train_set = new ProcessingVMatrix(the_train_set, prog); 00261 } 00262 kernel->setDataForKernelMatrix(the_train_set); 00263 inherited::setTrainingSet(the_train_set, call_forget); 00264 } 00265 00266 #if 0 00267 00268 // outputsize // 00270 int SVMClassificationTorch::outputsize() const 00271 { 00272 // Compute and return the size of this learner's output, (which typically 00273 // may depend on its inputsize(), targetsize() and set options). 00274 } 00275 00277 // train // 00279 void SVMClassificationTorch::train() 00280 { 00281 // The role of the train method is to bring the learner up to stage==nstages, 00282 // updating train_stats with training costs measured on-line in the process. 00283 00284 /* TYPICAL CODE: 00285 00286 static Vec input // static so we don't reallocate/deallocate memory each time... 00287 static Vec target // (but be careful that static means shared!) 00288 input.resize(inputsize()) // the train_set's inputsize() 00289 target.resize(targetsize()) // the train_set's targetsize() 00290 real weight 00291 00292 if(!train_stats) // make a default stats collector, in case there's none 00293 train_stats = new VecStatsCollector() 00294 00295 if(nstages<stage) // asking to revert to a previous stage! 00296 forget() // reset the learner to stage=0 00297 00298 while(stage<nstages) 00299 { 00300 // clear statistics of previous epoch 00301 train_stats->forget() 00302 00303 //... train for 1 stage, and update train_stats, 00304 // using train_set->getSample(input, target, weight) 00305 // and train_stats->update(train_costs) 00306 00307 ++stage 00308 train_stats->finalize() // finalize statistics for this epoch 00309 } 00310 */ 00311 00312 if (stage >= nstages) { 00313 PLWARNING("In SVMClassificationTorch::train - Learner has already been trained, skipping training"); 00314 return; 00315 } 00316 } 00317 #endif 00318 00319 } // end of namespace PLearn 00320 00321 00322 /* 00323 Local Variables: 00324 mode:c++ 00325 c-basic-offset:4 00326 c-file-style:"stroustrup" 00327 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00328 indent-tabs-mode:nil 00329 fill-column:79 00330 End: 00331 */ 00332 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :