PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // TorchLearner.cc 00004 // 00005 // Copyright (C) 2005 Olivier Delalleau 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: TorchLearner.cc 6351 2006-10-25 19:05:45Z chapados $ 00037 ******************************************************* */ 00038 00039 // Authors: Olivier Delalleau 00040 00044 #include "TorchLearner.h" 00045 #include <plearn_torch/TMachine.h> 00046 #include <plearn_torch/TTorchDataSetFromVMat.h> 00047 #include <plearn_torch/TTrainer.h> 00048 00049 namespace PLearn { 00050 using namespace std; 00051 00053 // TorchLearner // 00055 TorchLearner::TorchLearner() 00056 : outputsize_(-1) 00057 { 00058 allocator = new Torch::Allocator; 00059 inputs = 0; 00060 } 00061 00062 PLEARN_IMPLEMENT_OBJECT(TorchLearner, 00063 "A generic learner that can use Torch learning algorithms.", 00064 "" 00065 ); 00066 00068 // declareOptions // 00070 void TorchLearner::declareOptions(OptionList& ol) 00071 { 00072 // ### For the "flags" of each option, you should typically specify 00073 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00074 // ### OptionBase::tuningoption. Another possible flag to be combined with 00075 // ### is OptionBase::nosave 00076 00077 // Build options. 00078 00079 declareOption(ol, "machine", &TorchLearner::machine, OptionBase::buildoption, 00080 "The Torch learning machine."); 00081 00082 declareOption(ol, "trainer", &TorchLearner::trainer, OptionBase::buildoption, 00083 "The Torch trainer, responsible for training the machine."); 00084 00085 // Learnt options. 00086 00087 declareOption(ol, "outputsize", &TorchLearner::outputsize_, OptionBase::learntoption, 00088 "Saves the output size of this learner for faster access."); 00089 00090 // Now call the parent class' declareOptions. 00091 inherited::declareOptions(ol); 00092 00093 // Hide unused parent's options. 00094 00095 redeclareOption(ol, "seed", &TorchLearner::seed_, OptionBase::nosave, 00096 "Torch learners in general will not use the PLearn seed."); 00097 00098 redeclareOption(ol, "nstages", &TorchLearner::nstages, OptionBase::nosave, 00099 "A Torch learner is usually only trained on one stage."); 00100 00101 } 00102 00104 // build // 00106 void TorchLearner::build() 00107 { 00108 inherited::build(); 00109 build_(); 00110 } 00111 00113 // build_ // 00115 void TorchLearner::build_() 00116 { 00117 // ### This method should do the real building of the object, 00118 // ### according to set 'options', in *any* situation. 00119 // ### Typical situations include: 00120 // ### - Initial building of an object from a few user-specified options 00121 // ### - Building of a "reloaded" object: i.e. from the complete set of all serialised options. 00122 // ### - Updating or "re-building" of an object after a few "tuning" options have been modified. 00123 // ### You should assume that the parent class' build_() has already been called. 00124 if (machine && machine->machine->outputs) 00125 outputsize_ = machine->machine->outputs->frame_size; 00126 // Initialize the inputs sequence. 00127 if (inputsize_ >= 0 && (!inputs || inputs->frame_size != inputsize_)) { 00128 allocator->free(inputs); // Free old input sequence. 00129 inputs = new(allocator) Torch::Sequence(1, inputsize_); 00130 } 00131 } 00132 00134 // computeCostsFromOutputs // 00136 void TorchLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 00137 const Vec& target, Vec& costs) const 00138 { 00139 // No cost computed for now. 00140 } 00141 00143 // computeOutput // 00145 void TorchLearner::computeOutput(const Vec& input, Vec& output) const 00146 { 00147 PLASSERT( outputsize_ >= 0); 00148 output.resize(outputsize_); 00149 inputs->copyFrom(input.data()); 00150 machine->forward(inputs); 00151 machine->machine->outputs->copyTo(output.data()); 00152 } 00153 00155 // forget // 00157 void TorchLearner::forget() 00158 { 00159 stage = 0; 00160 outputsize_ = -1; 00161 if (machine) 00162 machine->reset(); 00163 } 00164 00166 // getTestCostNames // 00168 TVec<string> TorchLearner::getTestCostNames() const 00169 { 00170 // No cost computed for now. 00171 return TVec<string>(); 00172 } 00173 00175 // getTrainCostNames // 00177 TVec<string> TorchLearner::getTrainCostNames() const 00178 { 00179 // No cost computed for now. 00180 return TVec<string>(); 00181 } 00182 00184 // makeDeepCopyFromShallowCopy // 00186 void TorchLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00187 { 00188 inherited::makeDeepCopyFromShallowCopy(copies); 00189 00190 // ### ex: 00191 // deepCopyField(trainvec, copies); 00192 00193 // ### Remove this line when you have fully implemented this method. 00194 PLERROR("TorchLearner::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!"); 00195 } 00196 00198 // outputsize // 00200 int TorchLearner::outputsize() const 00201 { 00202 // Compute and return the size of this learner's output, (which typically 00203 // may depend on its inputsize(), targetsize() and set options). 00204 PLASSERT( machine ); 00205 PLASSERT( outputsize_ >= 0 || machine->machine->outputs ); 00206 if (outputsize_ >=0) 00207 return outputsize_; 00208 return machine->machine->outputs->frame_size; 00209 } 00210 00212 // setTrainingSet // 00214 void TorchLearner::setTrainingSet(VMat training_set, bool call_forget) { 00215 inherited::setTrainingSet(training_set, call_forget); 00216 torch_train_set = new TTorchDataSetFromVMat(training_set); 00217 allocator->free(inputs); // Free old input sequence. 00218 inputs = new(allocator) Torch::Sequence(1, training_set->inputsize()); 00219 } 00220 00222 // train // 00224 void TorchLearner::train() 00225 { 00226 if (stage >= nstages) { 00227 PLWARNING("In TorchLearner::train - Learner has already been trained, skipping training"); 00228 return; 00229 } 00230 if (!trainer || !machine) 00231 PLERROR("In TorchLearner::train - You must set both the 'trainer' and 'machine' options " 00232 "before calling train()"); 00233 trainer->train((TTorchDataSetFromVMat*) torch_train_set); 00234 if (machine->machine->outputs) 00235 // Update outputsize_ 00236 outputsize_ = machine->machine->outputs->frame_size; 00237 stage = 1; 00238 } 00239 00241 // ~TorchLearner // 00243 TorchLearner::~TorchLearner() { 00244 delete allocator; 00245 } 00246 00247 } // end of namespace PLearn 00248 00249 00250 /* 00251 Local Variables: 00252 mode:c++ 00253 c-basic-offset:4 00254 c-file-style:"stroustrup" 00255 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00256 indent-tabs-mode:nil 00257 fill-column:79 00258 End: 00259 */ 00260 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :