PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearnerOutputVMatrix.cc 00004 // 00005 // Copyright (C) 2003 Yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: PLearnerOutputVMatrix.cc 8617 2008-03-03 17:45:54Z nouiz $ 00037 ******************************************************* */ 00038 00039 // Authors: Yoshua Bengio 00040 00044 #include "PLearnerOutputVMatrix.h" 00045 00046 namespace PLearn { 00047 using namespace std; 00048 00049 00050 PLearnerOutputVMatrix::PLearnerOutputVMatrix(bool call_build_) 00051 :inherited(call_build_), 00052 put_raw_input(false), 00053 put_non_input(true), 00054 train_learners(false), 00055 compute_output_once(false) 00056 /* ### Initialize all fields to their default value */ 00057 { 00058 if( call_build_ ) 00059 build_(); 00060 } 00061 00062 PLearnerOutputVMatrix::PLearnerOutputVMatrix(VMat source_, 00063 TVec< PP<PLearner> > learners_, 00064 bool put_raw_input_, 00065 bool train_learners_, 00066 bool compute_output_once_, 00067 bool put_non_input_, 00068 bool call_build_) 00069 : inherited(source_, call_build_), 00070 learners(learners_), 00071 put_raw_input(put_raw_input_), 00072 put_non_input(put_non_input_), 00073 train_learners(train_learners_), 00074 compute_output_once(compute_output_once_) 00075 { 00076 if( call_build_ ) 00077 build_(); 00078 } 00079 00080 PLearnerOutputVMatrix::PLearnerOutputVMatrix(VMat source_, 00081 PP<PLearner> learner, 00082 bool put_raw_input_, 00083 bool train_learners_, 00084 bool compute_output_once_, 00085 bool put_non_input_, 00086 bool call_build_) 00087 : inherited(source_, call_build_), 00088 put_raw_input(put_raw_input_), 00089 put_non_input(put_non_input_), 00090 train_learners(train_learners_), 00091 compute_output_once(compute_output_once_) 00092 { 00093 learners.resize(1); 00094 learners[0] = learner; 00095 if( call_build_ ) 00096 build_(); 00097 } 00098 00099 PLEARN_IMPLEMENT_OBJECT(PLearnerOutputVMatrix, 00100 "Use a PLearner to transform the input part of a" 00101 " source data set", 00102 "The input part of this VMatrix is obtained from the" 00103 " input part of a source\n" 00104 "data set on which one or more PLearner's" 00105 " computeOutput method is applied.\n" 00106 "The other columns of the source data set are copied" 00107 " as is.\n" 00108 "Optionally, the raw input can be copied as well" 00109 " always in the input part of\n" 00110 "the new VMatrix. The order of the elements of a new" 00111 " row is as follows:\n" 00112 " - the outputs of the learners (concatenated) when" 00113 " applied on the input part\n" 00114 " of the source data,\n" 00115 " - optionally, the raw input part of the source" 00116 " data,\n" 00117 " - optionally, all the non-input columns of the" 00118 " source data\n" 00119 "\n" 00120 "When the learner has to be trained, a different" 00121 " dataset can be used for the\n" 00122 "training and the output, by using the 'data_train'" 00123 " option.\n"); 00124 00125 void PLearnerOutputVMatrix::getNewRow(int i, const Vec& v) const 00126 { 00127 int c=0; 00128 if (learners_need_train) { 00129 // We need to train the learners first. 00130 for (int k = 0; k < learners.length(); k++) 00131 { 00132 PP<VecStatsCollector> stats = new VecStatsCollector(); 00133 learners[k]->setTrainStatsCollector(stats); 00134 learners[k]->train(); 00135 stats->finalize(); 00136 } 00137 learners_need_train = false; 00138 } 00139 source->getRow(i,row); 00140 00141 if(compute_output_once) { 00142 // Use precomputed outputs 00143 for (int j=0;j<learners.length();j++) 00144 { 00145 v.subVec(c,learners[j]->outputsize()) 00146 << complete_learners_output[j](i); 00147 c += learners[j]->outputsize(); 00148 } 00149 } 00150 00151 else { 00152 // Compute output for each learner; now allow each learner to have a 00153 // different outputsize. The variable 'learners_output' is kept for 00154 // backwards compatibility, but is no longer strictly necessary 00155 for (int j=0;j<learners.length();j++) 00156 { 00157 int cur_outputsize = learners[j]->outputsize(); 00158 learners_output[j].resize(cur_outputsize); 00159 learners[j]->computeOutput(learner_input, learners_output[j]); 00160 v.subVec(c, cur_outputsize) << learners_output[j]; 00161 c += cur_outputsize; 00162 } 00163 } 00164 00165 if (put_raw_input) 00166 { 00167 v.subVec(c,learner_input->length()) << learner_input; 00168 c+=learner_input->length(); 00169 } 00170 if (put_non_input) 00171 v.subVec(c,non_input_part_of_source_row.length()) 00172 << non_input_part_of_source_row; 00173 } 00174 00176 // declareOptions // 00178 void PLearnerOutputVMatrix::declareOptions(OptionList& ol) 00179 { 00180 // ### Declare all of this object's options here 00181 // ### For the "flags" of each option, you should typically specify 00182 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00183 // ### OptionBase::tuningoption. Another possible flag to be combined with 00184 // ### is OptionBase::nosave 00185 00186 declareOption(ol, "data", &PLearnerOutputVMatrix::source, 00187 (OptionBase::learntoption | OptionBase::nosave), 00188 "DEPRECATED - Use 'source' instead."); 00189 00190 declareOption(ol, "learners", &PLearnerOutputVMatrix::learners, 00191 OptionBase::buildoption, 00192 "The vector of PLearners which will be applied to 'source'" 00193 " data set."); 00194 00195 declareOption(ol, "put_raw_input", &PLearnerOutputVMatrix::put_raw_input, 00196 OptionBase::buildoption, 00197 "Whether to include in the input part of this VMatrix the" 00198 " raw input part\n" 00199 "of 'source'.\n"); 00200 00201 declareOption(ol, "put_non_input", &PLearnerOutputVMatrix::put_non_input, 00202 OptionBase::buildoption, 00203 "Whether to include in this VMatrix the original target and" 00204 " weights."); 00205 00206 declareOption(ol, "train_learners", &PLearnerOutputVMatrix::train_learners, 00207 OptionBase::buildoption, 00208 "If set to 1, the learners will be train on 'source' (or" 00209 " 'data_train' if present)\n" 00210 "before computing the output.\n"); 00211 00212 declareOption(ol, "data_train", &PLearnerOutputVMatrix::data_train, 00213 OptionBase::buildoption, 00214 "If provided and 'train_learners' is set to 1, the learner" 00215 " will be trained\n" 00216 "on this dataset.\n"); 00217 00218 declareOption(ol, "compute_output_once", 00219 &PLearnerOutputVMatrix::compute_output_once, 00220 OptionBase::buildoption, 00221 "If set to 1, the output of the learners will be computed" 00222 " once and stored"); 00223 00224 declareOption(ol, "fieldinfos_source", 00225 &PLearnerOutputVMatrix::fieldinfos_source, 00226 OptionBase::buildoption, 00227 "If provided, the fieldnames will be copied from this VMat."); 00228 00229 // Now call the parent class' declareOptions 00230 inherited::declareOptions(ol); 00231 } 00232 00234 // build_ // 00236 void PLearnerOutputVMatrix::build_() 00237 { 00238 updateMtime(source); 00239 00240 if (source && learners.length()>0 && learners[0]) 00241 { 00242 learners_need_train = train_learners; 00243 row.resize(source->width()); 00244 00245 if (train_learners) { 00246 // Set the learners' training set. 00247 for (int i = 0; i < learners.length(); i++) { 00248 if (data_train) 00249 learners[i]->setTrainingSet(data_train); 00250 else 00251 learners[i]->setTrainingSet(source); 00252 } 00253 00254 // Note that the learners will be train only if we actually 00255 // call getRow() or if compute_output_once is true 00256 } 00257 00258 if(compute_output_once) 00259 { 00260 complete_learners_output.resize(learners.length()); 00261 for (int i = 0; i < learners.length(); i++) { 00262 if(train_learners) 00263 { 00264 PP<VecStatsCollector> stats = new VecStatsCollector(); 00265 learners[i]->setTrainStatsCollector(stats); 00266 learners[i]->train(); 00267 stats->finalize(); 00268 } 00269 complete_learners_output[i].resize(source->length(), 00270 learners[i]->outputsize()); 00271 } 00272 learners_need_train = false; 00273 00274 Vec input_row = row.subVec(0,source->inputsize()); 00275 00276 for(int i=0; i<source->length();i++) 00277 { 00278 source->getRow(i,row); 00279 for (int j=0;j<learners.length();j++) 00280 { 00281 Vec out_j = complete_learners_output[j](i); 00282 learners[j]->computeOutput(input_row,out_j); 00283 } 00284 } 00285 } 00286 00287 if (source->inputsize() < 0) 00288 PLERROR("In PLearnerOutputVMatrix::build_ - The 'source' matrix" 00289 " has a negative inputsize"); 00290 if (source->targetsize() < 0) 00291 PLERROR("In PLearnerOutputVMatrix::build_ - The 'source' matrix" 00292 " has a negative targetsize"); 00293 if (source->weightsize() < 0) 00294 PLERROR("In PLearnerOutputVMatrix::build_ - The 'source' matrix" 00295 " has a negative weightsize"); 00296 00297 // Some further state variable initializations 00298 learner_input = row.subVec(0,source->inputsize()); 00299 learner_target = row.subVec(source->inputsize(),source->targetsize()); 00300 non_input_part_of_source_row = 00301 row.subVec(source->inputsize(), 00302 source->width() - source->inputsize()); 00303 learners_output.resize(learners->length()); 00304 00305 // Compute the total width of the VMatrix and the width of the various 00306 // components 00307 inputsize_ = 0; 00308 for (int i=0;i<learners->length();i++) 00309 inputsize_ += learners[i]->outputsize(); 00310 if (put_raw_input) 00311 inputsize_ += source->inputsize(); 00312 if (put_non_input) { 00313 targetsize_ = source->targetsize(); 00314 weightsize_ = source->weightsize(); 00315 extrasize_ = source->extrasize(); 00316 width_ = inputsize_ + targetsize_ + weightsize_ + extrasize_; 00317 } 00318 else { 00319 targetsize_ = 0; 00320 weightsize_ = 0; 00321 width_ = inputsize_; 00322 } 00323 length_ = source->length(); 00324 00325 // Set field info. 00326 if (fieldinfos_source) 00327 setFieldInfos(fieldinfos_source->getFieldInfos()); 00328 else 00329 { 00330 TVec<string> fieldnames; 00331 for(int k=0; k<learners.length(); k++) 00332 fieldnames.append(learners[k]->getOutputNames()); 00333 if(put_raw_input) 00334 fieldnames.append(source->inputFieldNames()); 00335 if(put_non_input) 00336 { 00337 fieldnames.append(source->targetFieldNames()); 00338 fieldnames.append(source->weightFieldNames()); 00339 fieldnames.append(source->extraFieldNames()); 00340 } 00341 declareFieldNames(fieldnames); 00342 } 00343 /* OLD CODE 00344 else { 00345 fieldinfos.resize(width_); 00346 if (put_non_input && 00347 source->getFieldInfos().size() >= source->inputsize() 00348 + source->targetsize()) 00349 { 00350 // We can retrieve the information for the target columns. 00351 for (int i = 0; i < source->targetsize(); i++) 00352 { 00353 fieldinfos[i + this->inputsize()] = 00354 source->getFieldInfos()[i + source->inputsize()]; 00355 } 00356 } 00357 } 00358 */ 00359 } 00360 } 00361 00363 // build // 00365 void PLearnerOutputVMatrix::build() 00366 { 00367 inherited::build(); 00368 build_(); 00369 } 00370 00371 void PLearnerOutputVMatrix::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00372 { 00373 inherited::makeDeepCopyFromShallowCopy(copies); 00374 deepCopyField(row, copies); 00375 deepCopyField(learner_input, copies); 00376 deepCopyField(learners_output, copies); 00377 deepCopyField(learner_target, copies); 00378 deepCopyField(non_input_part_of_source_row, copies); 00379 deepCopyField(complete_learners_output, copies); 00380 deepCopyField(data_train, copies); 00381 deepCopyField(learners, copies); 00382 } 00383 00384 } // end of namespace PLearn 00385 00386 00387 /* 00388 Local Variables: 00389 mode:c++ 00390 c-basic-offset:4 00391 c-file-style:"stroustrup" 00392 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00393 indent-tabs-mode:nil 00394 fill-column:79 00395 End: 00396 */ 00397 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :