PLearn 0.1
RegressionTreeRegisters.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RegressionTreeRegisters.cc
00004 // Copyright (c) 1998-2002 Pascal Vincent
00005 // Copyright (C) 1999-2002 Yoshua Bengio and University of Montreal
00006 // Copyright (c) 2002 Jean-Sebastien Senecal, Xavier Saint-Mleux, Rejean Ducharme
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 
00037 /* **********************************************************************************    
00038  * $Id: RegressionTreeRegisters.cc, v 1.0 2004/07/19 10:00:00 Bengio/Kegl/Godbout *
00039  * This file is part of the PLearn library.                                       *
00040  ********************************************************************************** */
00041 
00042 #include "RegressionTreeRegisters.h"
00043 #include "RegressionTreeLeave.h"
00044 #define PL_LOG_MODULE_NAME RegressionTreeRegisters
00045 #include <plearn/io/pl_log.h>
00046 #include <plearn/vmat/TransposeVMatrix.h>
00047 #include <plearn/vmat/MemoryVMatrixNoSave.h>
00048 #include <plearn/vmat/SubVMatrix.h>
00049 #include <plearn/io/fileutils.h>
00050 #include <plearn/io/load_and_save.h>
00051 #include <limits>
00052 
00053 namespace PLearn {
00054 using namespace std;
00055 
00056 PLEARN_IMPLEMENT_OBJECT(RegressionTreeRegisters,
00057                         "Object to maintain the various registers of a regression tree", 
00058                         "It is used first, to sort the learner train set on all dimensions of the input samples.\n"
00059                         "It keeps matrices of row indices to navigate thru the training set in ascending value order fo each variable.\n"
00060                         "Missing values are sorted at the beginning of the column.\n"
00061                         "It also keeps registers of which leave, a row belongs to as the tree is built.\n"
00062                         "It is also used to maintain the next available leave id.\n"
00063     );
00064 
00065 RegressionTreeRegisters::RegressionTreeRegisters():
00066     report_progress(0),
00067     verbosity(0),
00068     next_id(0),
00069     do_sort_rows(true),
00070     mem_tsource(true),
00071     have_missing(true),
00072     compact_reg_leave(-1)
00073 {
00074     build();
00075 }
00076 
00077 RegressionTreeRegisters::RegressionTreeRegisters(VMat source_,
00078                                                  TMat<RTR_type> tsorted_row_,
00079                                                  VMat tsource_,
00080                                                  bool report_progress_,
00081                                                  bool verbosity_,
00082                                                  bool do_sort_rows_,
00083                                                  bool mem_tsource_):
00084     report_progress(report_progress_),
00085     verbosity(verbosity_),
00086     next_id(0),
00087     do_sort_rows(do_sort_rows_),
00088     mem_tsource(mem_tsource_),
00089     have_missing(true),
00090     compact_reg_leave(-1)
00091 {
00092     source = source_;
00093     tsource = tsource_;
00094     if(tsource->classname()=="MemoryVMatrixNoSave")
00095         tsource_mat = tsource.toMat();
00096     tsorted_row = tsorted_row_;
00097     checkMissing();
00098     build();
00099 }
00100 
00101 RegressionTreeRegisters::RegressionTreeRegisters(VMat source_,
00102                                                  bool report_progress_,
00103                                                  bool verbosity_,
00104                                                  bool do_sort_rows_,
00105                                                  bool mem_tsource_):
00106     report_progress(report_progress_),
00107     verbosity(verbosity_),
00108     next_id(0),
00109     do_sort_rows(do_sort_rows_),
00110     mem_tsource(mem_tsource_),
00111     have_missing(true),
00112     compact_reg_leave(-1)
00113 {
00114     source = source_;
00115     build();
00116 }
00117 
00118 RegressionTreeRegisters::~RegressionTreeRegisters()
00119 {
00120 }
00121 
00122 void RegressionTreeRegisters::declareOptions(OptionList& ol)
00123 { 
00124     declareOption(ol, "report_progress", &RegressionTreeRegisters::report_progress, OptionBase::buildoption,
00125                   "The indicator to report progress through a progress bar\n");
00126     declareOption(ol, "verbosity", &RegressionTreeRegisters::verbosity, OptionBase::buildoption,
00127                   "The desired level of verbosity\n");
00128     declareOption(ol, "tsource", &RegressionTreeRegisters::tsource,
00129                   OptionBase::learntoption | OptionBase::nosave,
00130                   "The source VMatrix transposed");
00131 
00132     declareOption(ol, "source", &RegressionTreeRegisters::source,
00133                   OptionBase::buildoption,
00134                   "The source VMatrix");
00135 
00136     declareOption(ol, "next_id", &RegressionTreeRegisters::next_id, OptionBase::learntoption,
00137                   "The next id for creating a new leave\n");
00138     declareOption(ol, "leave_register", &RegressionTreeRegisters::leave_register, OptionBase::learntoption,
00139                   "The vector identifying the leave to which, each row belongs\n");
00140 
00141     declareOption(ol, "do_sort_rows", &RegressionTreeRegisters::do_sort_rows,
00142                   OptionBase::buildoption,
00143                   "Do we generate the sorted rows? Not usefull if used only to test.\n");
00144 
00145     declareOption(ol, "mem_tsource", &RegressionTreeRegisters::mem_tsource,
00146                   OptionBase::buildoption,
00147                   "Do we put the tsource in memory? default to true as this"
00148                   " give an great speed up for the trainning of RegressionTree.\n");
00149 
00150     //too big to save
00151     declareOption(ol, "tsorted_row", &RegressionTreeRegisters::tsorted_row, OptionBase::nosave,
00152                   "The matrix holding the sequence of samples in ascending value order for each dimension\n");
00153 
00154     inherited::declareOptions(ol);
00155 }
00156 
00157 void RegressionTreeRegisters::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00158 {
00159     inherited::makeDeepCopyFromShallowCopy(copies);
00160     deepCopyField(leave_register, copies);
00161 //tsource and tsorted_row should be deep copied, but currently when it is deep copied
00162 // the copy is modified. To save memory we don't do it.
00163 // It is deep copied eavily by HyperLearner and HyperOptimizer
00164 //    deepCopyField(tsorted_row, copies);
00165 //    deepCopyField(tsource,copies);
00166 //no need to deep copy source as we don't reuse it after initialization
00167 //    deepCopyField(source,copies);
00168 }
00169 
00170 void RegressionTreeRegisters::build()
00171 {
00172     inherited::build();
00173     build_();
00174 }
00175 
00176 void RegressionTreeRegisters::build_()
00177 {
00178     if(!source)
00179         return;
00180     //check that we can put all the examples of the train_set
00181     //with respect to the size of RTR_type who limit the capacity
00182     PLCHECK(source.length()>0 
00183             && (unsigned)source.length()
00184             <= std::numeric_limits<RTR_type>::max());
00185     PLCHECK(source->targetsize()==1);
00186     PLCHECK(source->weightsize()<=1);
00187     PLCHECK(source->inputsize()>0);
00188 
00189     if(!tsource){
00190         tsource = VMat(new TransposeVMatrix(new SubVMatrix(
00191                                                 source, 0,0,source->length(),
00192                                                 source->inputsize())));
00193         if(mem_tsource){
00194             PP<MemoryVMatrixNoSave> tmp = new MemoryVMatrixNoSave(tsource);
00195             tsource = VMat(tmp);
00196         }
00197         if(tsource->classname()=="MemoryVMatrixNoSave")
00198             tsource_mat = tsource.toMat();
00199     }
00200     setMetaInfoFrom(source);
00201     weightsize_=1;
00202     targetsize_=1;
00203     target_weight.resize(source->length());
00204     if(source->weightsize()<=0){
00205         width_++;
00206         for(int i=0;i<source->length();i++){
00207             target_weight[i].first=source->get(i,inputsize());
00208             target_weight[i].second=1.0 / length();
00209         }
00210     }else
00211         for(int i=0;i<source->length();i++){
00212             target_weight[i].first=source->get(i,inputsize());
00213             target_weight[i].second=source->get(i,inputsize()+targetsize());
00214         }
00215 #if 0
00216     //usefull to weight the dataset to have the sum of weight==1 or ==length()
00217     real weights_sum=0;
00218     for(int i=0;i<source->length();i++){
00219         weights_sum+=target_weight[i].second;
00220     }
00221     pout<<weights_sum<<endl;
00222 //    real t=length()/weights_sum;
00223     real t=1/weights_sum;
00224     for(int i=0;i<source->length();i++){
00225         target_weight[i].second*=t;
00226     }
00227     weights_sum=0;
00228     for(int i=0;i<source->length();i++){
00229         weights_sum+=target_weight[i].second;
00230     }
00231     pout<<weights_sum<<endl;
00232 #endif
00233 
00234     leave_register.resize(length());
00235     sortRows();
00236 //    compact_reg.resize(length());
00237 }
00238 
00239 void RegressionTreeRegisters::reinitRegisters()
00240 {
00241     next_id = 0;
00242 
00243     //in case we don't save the sorted data
00244     sortRows();
00245 }
00246 
00247 void RegressionTreeRegisters::getAllRegisteredRowLeave(
00248     RTR_type_id leave_id, int col,
00249     TVec<RTR_type> &reg,
00250     TVec<pair<RTR_target_t,RTR_weight_t> > &t_w,
00251     Vec &value,
00252     PP<RegressionTreeLeave> missing_leave,
00253     PP<RegressionTreeLeave> left_leave,
00254     PP<RegressionTreeLeave> right_leave,
00255     TVec<RTR_type> &candidate) const
00256 {
00257     PLASSERT(tsource_mat.length()==tsource.length());
00258 
00259     getAllRegisteredRow(leave_id,col,reg);
00260     t_w.resize(reg.length());
00261     value.resize(reg.length());
00262     real * p = tsource_mat[col];
00263     pair<RTR_target_t,RTR_weight_t> * ptw = target_weight.data();
00264     pair<RTR_target_t,RTR_weight_t>* ptwd = t_w.data();
00265     real * pv = value.data();
00266     RTR_type * preg = reg.data();
00267 
00268     //It is better to do multiple pass for memory access.
00269 
00270     //we do this optimization in case their is many row with the same value
00271     //at the end as with binary variable.
00272     //we do it here to overlap computation and memory access
00273     int row_idx_end = reg.size() - 1;
00274     int prev_row=preg[row_idx_end];
00275     real prev_val=p[prev_row];
00276     PLASSERT(reg.size()>row_idx_end && row_idx_end>=0);
00277     PLASSERT(is_equal(p[prev_row],tsource(col,prev_row)));
00278 
00279     for( ;row_idx_end>0;row_idx_end--)
00280     {
00281         int futur_row = preg[row_idx_end-8];
00282         __builtin_prefetch(&ptw[futur_row],1,2);
00283         __builtin_prefetch(&p[futur_row],1,2);
00284 
00285         int row=prev_row;
00286         real val=prev_val;
00287         prev_row = preg[row_idx_end-1];
00288         prev_val = p[prev_row];
00289 
00290         PLASSERT(reg.size()>row_idx_end && row_idx_end>0);
00291         PLASSERT(target_weight.size()>row && row>=0);
00292         PLASSERT(is_equal(p[row],tsource(col,row)));
00293         RTR_target_t target = ptw[row].first;
00294         RTR_weight_t weight = ptw[row].second;
00295 
00296         if (RTR_HAVE_MISSING && is_missing(val))
00297             missing_leave->addRow(row, target, weight);
00298         else if(val==prev_val)
00299             right_leave->addRow(row, target, weight);
00300         else
00301             break;
00302     }
00303 
00304     //We need the last data for an optimization in RTN
00305     {
00306         int idx=reg.size()-1;
00307         PLASSERT(reg.size()>idx && idx>=0);
00308         int row=int(preg[idx]);
00309         PLASSERT(target_weight.size()>row && row>=0);
00310         PLASSERT(is_equal(p[row],tsource(col,row)));
00311         pv[idx]=p[row];
00312     }
00313     for(int row_idx = 0;row_idx<=row_idx_end;row_idx++)
00314     {
00315         int futur_row = preg[row_idx+8];
00316         __builtin_prefetch(&ptw[futur_row],1,2);
00317         __builtin_prefetch(&p[futur_row],1,2);
00318             
00319         PLASSERT(reg.size()>row_idx && row_idx>=0);
00320         int row=int(preg[row_idx]);
00321         real val=p[row];
00322         PLASSERT(target_weight.size()>row && row>=0);
00323         PLASSERT(is_equal(p[row],tsource(col,row)));
00324         
00325         RTR_target_t target = ptw[row].first;
00326         RTR_weight_t weight = ptw[row].second;
00327         if (RTR_HAVE_MISSING && is_missing(val)){
00328             missing_leave->addRow(row, target, weight);
00329         }else {
00330             left_leave->addRow(row, target, weight);
00331             candidate.append(row);
00332             ptwd[row_idx].first=ptw[row].first;
00333             ptwd[row_idx].second=ptw[row].second;
00334             pv[row_idx]=val;
00335         }
00336     }
00337     t_w.resize(candidate.size());
00338     value.resize(candidate.size());
00339 }
00340 
00341 void RegressionTreeRegisters::getAllRegisteredRow(RTR_type_id leave_id, int col,
00342                                                   TVec<RTR_type> &reg,
00343                                                   TVec<pair<RTR_target_t,RTR_weight_t> > &t_w,
00344                                                   Vec &value) const
00345 {
00346     PLASSERT(tsource_mat.length()==tsource.length());
00347 
00348     getAllRegisteredRow(leave_id,col,reg);
00349     t_w.resize(reg.length());
00350     value.resize(reg.length());
00351     real * p = tsource_mat[col];
00352     pair<RTR_target_t,RTR_weight_t> * ptw = target_weight.data();
00353     pair<RTR_target_t,RTR_weight_t>* ptwd = t_w.data();
00354     real * pv = value.data();
00355     RTR_type * preg = reg.data();
00356 
00357     if(weightsize() <= 0){
00358         RTR_weight_t w = 1.0 / length();
00359         for(int i=0;i<reg.length();i++){
00360             PLASSERT(tsource->get(col, reg[i])==p[reg[i]]);
00361             int idx = int(preg[i]);
00362             ptwd[i].first = ptw[idx].first;
00363             ptwd[i].second = w;
00364             pv[i] = p[idx];
00365         }
00366     } else {
00367         //It is better to do multiple pass for memory access.
00368         for(int i=0;i<reg.length();i++){
00369             int idx = int(preg[i]);
00370             ptwd[i].first = ptw[idx].first;
00371             ptwd[i].second = ptw[idx].second;
00372 
00373         }
00374         for(int i=0;i<reg.length();i++){
00375             PLASSERT(tsource->get(col, reg[i])==p[reg[i]]);
00376             int idx = int(preg[i]);
00377             pv[i] = p[idx];
00378         }
00379     }
00380 }
00381 
00384 void RegressionTreeRegisters::getAllRegisteredRow(RTR_type_id leave_id,
00385                                                   TVec<RTR_type> &reg) const
00386 {
00387     PLASSERT(tsource_mat.length()==tsource.length());
00388 
00389     int idx=0;
00390     int n=reg.length();
00391     RTR_type* preg = reg.data();
00392     RTR_type_id* pleave_register = leave_register.data();
00393     for(int i=0;i<length() && n> idx;i++){
00394         if (pleave_register[i] == leave_id){
00395             preg[idx++]=i;
00396             PLASSERT(reg[idx-1]==i);
00397         }
00398     }
00399     PLASSERT(idx==reg->size());
00400 }
00401 
00404 void RegressionTreeRegisters::getAllRegisteredRow(RTR_type_id leave_id, int col,
00405                                                   TVec<RTR_type> &reg) const
00406 {
00407     PLASSERT(tsource_mat.length()==tsource.length());
00408 
00409     int idx=0;
00410     int n=reg.length();
00411     RTR_type* preg = reg.data();
00412     RTR_type* ptsorted_row = tsorted_row[col];
00413     RTR_type_id* pleave_register = leave_register.data();
00414     if(reg.size()==length()){
00415         //get the full row
00416         reg<<tsorted_row(col);
00417         idx=length();
00418     }else if(compact_reg.size()==0){
00419         for(int i=0;i<length() && n> idx;i++){
00420             PLASSERT(ptsorted_row[i]==tsorted_row(col, i));
00421             RTR_type srow = ptsorted_row[i];
00422             if ( pleave_register[srow] == leave_id){
00423                 PLASSERT(leave_register[srow] == leave_id);
00424                 PLASSERT(preg[idx]==reg[idx]);
00425                 preg[idx++]=srow;
00426             }
00427         }
00428     }else if(compact_reg_leave==leave_id){
00429         //compact_reg is used as an optimization.
00430         //as it is more compact in memory then leave_register
00431         //we are more memory friendly.
00432         for(int i=0;i<length() && n> idx;i++){
00433             PLASSERT(ptsorted_row[i]==tsorted_row(col, i));
00434             RTR_type srow = ptsorted_row[i];
00435             if ( compact_reg[srow] ){
00436                 PLASSERT(leave_register[srow] == leave_id);
00437                 PLASSERT(preg[idx]==reg[idx]);
00438                 preg[idx++]=srow;
00439             }
00440         }
00441     }else{
00442         compact_reg.resize(0);
00443         compact_reg.resize(length(),false);
00444 //        for(uint i=0;i<compact_reg.size();i++)
00445 //            compact_reg[i]=false;
00446         for(int i=0;i<length() && n> idx;i++){
00447             PLASSERT(ptsorted_row[i]==tsorted_row(col, i));
00448             RTR_type srow = ptsorted_row[i];
00449             if ( pleave_register[srow] == leave_id){
00450                 PLASSERT(leave_register[srow] == leave_id);
00451                 PLASSERT(preg[idx]==reg[idx]);
00452                 preg[idx++]=srow;
00453                 compact_reg[srow]=true;
00454             }
00455         }
00456         compact_reg_leave = leave_id;
00457     }
00458     PLASSERT(idx==reg->size());
00459 
00460 }
00461 
00462 tuple<real,real,int> RegressionTreeRegisters::bestSplitInRow(
00463     RTR_type_id leave_id, int col, TVec<RTR_type> &reg,
00464     PP<RegressionTreeLeave> left_leave,
00465     PP<RegressionTreeLeave> right_leave,
00466     Vec left_error, Vec right_error) const
00467 {
00468     PLCHECK(!haveMissing());
00469 
00470     if(!tmp_leave){
00471         tmp_leave = ::PLearn::deepCopy(left_leave);
00472         tmp_vec.resize(left_leave->outputsize());
00473     }
00474 
00475     PLASSERT(tsource_mat.length()==tsource.length());
00476     getAllRegisteredRow(leave_id,col,reg);
00477     real * p = tsource_mat[col];
00478     pair<RTR_target_t,RTR_weight_t>* ptw = target_weight.data();
00479     RTR_type * preg = reg.data();
00480 
00481     int row_idx_end = reg.size() - 1;
00482     int prev_row=preg[row_idx_end];
00483     real prev_val=p[prev_row];
00484     PLASSERT(reg.size()>row_idx_end && row_idx_end>=0);
00485     PLASSERT(p[prev_row]==tsource(col,prev_row));
00486     //fill right_leave
00487     for( ;row_idx_end>0;row_idx_end--)
00488     {
00489         int futur_row = preg[row_idx_end-8];
00490         __builtin_prefetch(&ptw[futur_row],1,2);
00491         __builtin_prefetch(&p[futur_row],1,2);
00492 
00493         int row=prev_row;
00494         real val=prev_val;
00495         prev_row = preg[row_idx_end-1];
00496         prev_val = p[prev_row];
00497 
00498         PLASSERT(reg.size()>row_idx_end && row_idx_end>0);
00499         PLASSERT(target_weight.size()>row && row>=0);
00500         PLASSERT(p[row]==tsource(col,row));
00501         RTR_target_t target = ptw[row].first;
00502         RTR_weight_t weight = ptw[row].second;
00503 
00504         if(val==prev_val)
00505             right_leave->addRow(row, target, weight);
00506         else
00507             break;
00508     }
00509 
00510     if(col==0){//do 2 pass finding of the best split.
00511         //fill left_leave
00512         for(int row_idx = 0;row_idx<=row_idx_end;row_idx++)
00513         {
00514             int futur_row = preg[row_idx+8];
00515             __builtin_prefetch(&ptw[futur_row],1,2);
00516             
00517             PLASSERT(reg.size()>row_idx && row_idx>=0);
00518             int row=int(preg[row_idx]);
00519             PLASSERT(target_weight.size()>row && row>=0);
00520             
00521             RTR_target_t target = ptw[row].first;
00522             RTR_weight_t weight = ptw[row].second;
00523             left_leave->addRow(row, target, weight);
00524         }
00525         tmp_leave->initStats();
00526         tmp_leave->addLeave(left_leave);
00527         tmp_leave->addLeave(right_leave);
00528 
00529     }else{//do 1 pass finding of the best split.
00530 
00531         left_leave->initStats();
00532         left_leave->addLeave(tmp_leave);
00533         left_leave->removeLeave(right_leave);
00534 
00535         PLASSERT(tmp_leave->length()==left_leave->length()+right_leave->length());
00536         PLASSERT(fast_is_equal(tmp_leave->weights_sum,left_leave->weights_sum+right_leave->weights_sum));
00537         PLASSERT(fast_is_equal(tmp_leave->targets_sum,left_leave->targets_sum+right_leave->targets_sum));
00538         PLASSERT(fast_is_equal(tmp_leave->weighted_targets_sum,left_leave->weighted_targets_sum+right_leave->weighted_targets_sum));
00539         PLASSERT(fast_is_equal(tmp_leave->weighted_squared_targets_sum,
00540                               left_leave->weighted_squared_targets_sum+right_leave->weighted_squared_targets_sum));
00541     }
00542 
00543     //find best_split
00544     int best_balance=INT_MAX;
00545     real best_feature_value = REAL_MAX;
00546     real best_split_error = REAL_MAX;
00547     if(left_leave->length()==0)
00548         return make_tuple(best_feature_value, best_split_error, best_balance);
00549 
00550     int iter=reg.size()-right_leave->length()-1;
00551     RTR_type row=preg[iter];
00552     real first_value=p[preg[0]];
00553     real next_feature=p[row];
00554 
00555 
00556     //next_feature!=first_value is to check if their is more split point
00557     // in case of binary variable or variable with few different value,
00558     // this give a great speed up.
00559     for(int i=iter-1;i>=0&&next_feature!=first_value;i--)
00560     {
00561         RTR_type next_row = preg[i];
00562         real row_feature=next_feature;
00563         next_feature=p[next_row];
00564         
00565         PLASSERT(next_row!=row);
00566 
00567         PLASSERT((i+1)<reg.size() || row==reg[i+1]);
00568         PLASSERT(next_row==reg[i]);
00569         PLASSERT(get(next_row, col)==next_feature);
00570         PLASSERT(get(row, col)==row_feature);
00571         PLASSERT(next_feature<=row_feature);
00572 
00573         int futur_row = preg[i-9];
00574         __builtin_prefetch(&ptw[futur_row],1,2);
00575         __builtin_prefetch(&p[futur_row],1,2);
00576 
00577 
00578         real target=ptw[row].first;
00579         real weight=ptw[row].second;
00580 
00581         left_leave->removeRow(row, target, weight);
00582         right_leave->addRow(row, target, weight);
00583 
00584         row = next_row;
00585         if (next_feature < row_feature){
00586             left_leave->getOutputAndError(tmp_vec, left_error);
00587             right_leave->getOutputAndError(tmp_vec, right_error);
00588         }else
00589             continue;
00590         real work_error = left_error[0]
00591             + left_error[1] + right_error[0] + right_error[1];
00592         int work_balance = abs(left_leave->length() -
00593                                right_leave->length());
00594         if (fast_is_more(work_error,best_split_error)) continue;
00595         else if (fast_is_equal(work_error,best_split_error) &&
00596                  fast_is_more(work_balance,best_balance)) continue;
00597 
00598         best_feature_value = 0.5 * (row_feature + next_feature);
00599         best_split_error = work_error;
00600         best_balance = work_balance;
00601     }
00602     return make_tuple(best_split_error, best_feature_value, best_balance);
00603 }
00604 
00605 void RegressionTreeRegisters::sortRows()
00606 {
00607     next_id = 0;
00608     if(!do_sort_rows)
00609         return;
00610     if (tsorted_row.length() == inputsize() && tsorted_row.width() == length())
00611     {
00612         verbose("RegressionTreeRegisters: Sorted train set indices are present, no sort required", 3);
00613         return;
00614     }
00615     string f=source->getMetaDataDir()+"RTR_tsorted_row.psave";
00616 
00617     if(isUpToDate(f)){
00618         DBG_LOG<<"RegressionTreeRegisters:: Reloading the sorted source VMatrix: "<<f<<endl;
00619         PLearn::load(f,tsorted_row);
00620         checkMissing();
00621         return;
00622     }
00623 
00624     verbose("RegressionTreeRegisters: The train set is being sorted", 3);
00625     tsorted_row.resize(inputsize(), length());
00626     PP<ProgressBar> pb;
00627     if (report_progress)
00628     {
00629         pb = new ProgressBar("RegressionTreeRegisters : sorting the train set on input dimensions: ", inputsize());
00630     }
00631     for(int row=0;row<tsorted_row.length();row++)
00632         for(int col=0;col<tsorted_row.width(); col++)
00633             tsorted_row(row,col)=col;
00634             
00635 //     for (int each_train_sample_index = 0; each_train_sample_index < length(); each_train_sample_index++)
00636 //     {
00637 //         sorted_row(each_train_sample_index).fill(each_train_sample_index);
00638 //     }
00639 #ifdef _OPENMP
00640 #pragma omp parallel for default(none) shared(pb)
00641 #endif
00642     for (int sample_dim = 0; sample_dim < inputsize(); sample_dim++)
00643     {
00644         sortEachDim(sample_dim);
00645         if (report_progress) pb->update(sample_dim+1);
00646     }
00647     checkMissing();
00648     if (report_progress) pb->close();//in case of parallel sort.
00649     if(source->hasMetaDataDir()){
00650         DBG_LOG<<"RegressionTreeRegisters:: Saving the sorted source VMatrix: "<<f<<endl;
00651         PLearn::save(f,tsorted_row);
00652     }else{
00653     }
00654 }
00655 
00657 void RegressionTreeRegisters::checkMissing()
00658 {
00659     if(have_missing==false)
00660         return;
00661     bool found_missing=false;
00662     for(int j=0;j<inputsize()&&!found_missing;j++)
00663         for(int i=0;i<length()&&!found_missing;i++)
00664             if(is_missing(tsource(j,i)))
00665                 found_missing=true;
00666     if(!found_missing)
00667         have_missing=false;
00668 }
00669 
00670 void RegressionTreeRegisters::sortEachDim(int dim)
00671 {
00672     PLCHECK_MSG(tsource->classname()=="MemoryVMatrixNoSave",tsource->classname().c_str());
00673     Mat m = tsource.toMat();
00674     Vec v = m(dim);
00675     TVec<int> order = v.sortingPermutation(true, true);
00676     tsorted_row(dim)<<order;
00677 
00678 #ifndef NDEBUG
00679     for(int i=0;i<length()-1;i++){
00680         int reg1 = tsorted_row(dim,i);
00681         int reg2 = tsorted_row(dim,i+1);
00682         real v1 = tsource(dim,reg1);
00683         real v2 = tsource(dim,reg2);
00684 //check that the sort is valid.
00685         PLASSERT(v1<=v2 || is_missing(v2));
00686 //check that the sort is stable
00687         if(v1==v2 && reg1>reg2)
00688             PLWARNING("In RegressionTreeRegisters::sortEachDim(%d) - "
00689                       "sort is not stable. make it stable to be more optimized:"
00690                       " reg1=%d, reg2=%d, v1=%f, v2=%f", 
00691                       dim, reg1, reg2, v1, v2);
00692     }
00693 #endif
00694     return;
00695 
00696 }
00697 
00698 void RegressionTreeRegisters::printRegisters()
00699 {
00700     cout << " register:  ";
00701     for (int ii = 0; ii < leave_register.length(); ii++) 
00702         cout << " " << tostring(leave_register[ii]);
00703     cout << endl;
00704 }
00705 
00706 void RegressionTreeRegisters::verbose(string the_msg, int the_level)
00707 {
00708     if (verbosity >= the_level)
00709         cout << the_msg << endl;
00710 }
00711 
00712 void RegressionTreeRegisters::getExample(int i, Vec& input, Vec& target, real& weight)
00713 {
00714 #ifdef BOUNDCHECK
00715     if(inputsize_<0)
00716         PLERROR("In RegressionTreeRegisters::getExample, inputsize_ not defined for this vmat");
00717     if(targetsize_<0)
00718         PLERROR("In RegressionTreeRegisters::getExample, targetsize_ not defined for this vmat");
00719     if(weightsize()<0)
00720         PLERROR("In RegressionTreeRegisters::getExample, weightsize_ not defined for this vmat");
00721 #endif
00722     //going by tsource is not thread safe as PP are not thread safe.
00723     //so we use tsource_mat.copyColumnTo that is thread safe.
00724     tsource_mat.copyColumnTo(i,input.data());
00725 
00726     target[0]=target_weight[i].first;
00727     weight = target_weight[i].second;
00728 }
00729 
00730 
00731 } // end of namespace PLearn
00732 
00733 
00734 /*
00735   Local Variables:
00736   mode:c++
00737   c-basic-offset:4
00738   c-file-style:"stroustrup"
00739   c-file-offsets:((innamespace . 0)(inline-open . 0))
00740   indent-tabs-mode:nil
00741   fill-column:79
00742   End:
00743 */
00744 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines