PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: CCCostVariable.cc 6861 2007-04-09 19:04:15Z saintmlx $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #include "CCCostVariable.h" 00044 #include <plearn/sys/PLMPI.h> 00045 #include <plearn/display/DisplayUtils.h> 00046 00047 namespace PLearn { 00048 using namespace std; 00049 00050 00051 00054 PLEARN_IMPLEMENT_OBJECT(CCCostVariable, 00055 "Variable that computes the (mean) correlation between the errors and the value of a candidat node for Cascade Correlation", 00056 "NO HELP"); 00057 00058 CCCostVariable::CCCostVariable(VMat the_distr, Func the_f_error, Func the_f_candidate) 00059 : inherited(nonInputParentsOfPath(the_f_candidate->inputs,the_f_candidate->outputs), 00060 1, 00061 1), 00062 distr(the_distr), f_error(the_f_error), f_candidate(the_f_candidate), 00063 input_value(the_distr->width()), 00064 input_gradient(distr->inputsize()), 00065 error_output_value(the_f_error->outputs[0]->size()), 00066 candidate_output_value(the_f_candidate->outputs[0]->size()), 00067 error_correlations(the_f_error->outputs[0]->size()), 00068 adjusted_gradient(1) 00069 { 00070 build_(); 00071 } 00072 00073 void 00074 CCCostVariable::build() 00075 { 00076 inherited::build(); 00077 build_(); 00078 } 00079 00080 void 00081 CCCostVariable::build_() 00082 { 00083 if (f_error && f_candidate && distr) { 00084 mean_error.resize(f_error->outputs[0]->size()); 00085 input_value.resize(distr->inputsize() + distr->targetsize() + distr->weightsize()); 00086 input_gradient.resize(distr->inputsize()); 00087 if(f_error->outputs.size() != 1) 00088 PLERROR("In CCCostVariable: error function must have a single variable output (maybe you can vconcat the vars into a single one prior to calling sumOf, if this is really what you want)"); 00089 00090 if(f_error->outputs[0].width() != 1) 00091 PLERROR("In CCCostVariable: the error function's output must be a column vector "); 00092 f_error->inputs.setDontBpropHere(true); 00093 00094 if(f_candidate->outputs.size() != 1) 00095 PLERROR("In CCCostVariable: candidate node function must have a single variable output (maybe you can vconcat the vars into a single one prior to calling sumOf, if this is really what you want)"); 00096 00097 if(f_candidate->outputs[0].width() != 1 || f_candidate->outputs[0].length() != 1) 00098 PLERROR("In CCCostVariable: the candidate node function's output must be a column vector "); 00099 } 00100 } 00101 00102 void 00103 CCCostVariable::declareOptions(OptionList &ol) 00104 { 00105 declareOption(ol, "distr", &CCCostVariable::distr, OptionBase::buildoption, ""); 00106 declareOption(ol, "f_error", &CCCostVariable::f_error, OptionBase::buildoption, ""); 00107 declareOption(ol, "f_candidate", &CCCostVariable::f_candidate, OptionBase::buildoption, ""); 00108 00109 inherited::declareOptions(ol); 00110 } 00111 00112 00113 void CCCostVariable::recomputeSize(int& l, int& w) const 00114 { 00115 w = 1; 00116 l = 1; 00117 } 00118 00119 00120 void CCCostVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00121 { 00122 inherited::makeDeepCopyFromShallowCopy(copies); 00123 deepCopyField(distr, copies); 00124 deepCopyField(f_error, copies); 00125 deepCopyField(f_candidate, copies); 00126 } 00127 00128 00129 void CCCostVariable::fprop() 00130 { 00131 f_error->recomputeParents(); 00132 f_candidate->recomputeParents(); 00133 mean_error.clear(); 00134 mean_candidate=0; 00135 00136 // Compute the means of the candidate node and of the error for every output 00137 for(int i=0; i<distr->length(); i++) 00138 { 00139 input_value.resize(distr->width()); 00140 distr->getRow(i, input_value); 00141 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00142 f_error->fprop(input_value, error_output_value); 00143 mean_error += error_output_value; 00144 f_candidate->fprop(input_value.subVec(0,distr->inputsize()),candidate_output_value); 00145 mean_candidate += candidate_output_value[0]; 00146 } 00147 00148 mean_error /= distr->length(); 00149 mean_candidate /= distr->length(); 00150 00151 value.clear(); 00152 error_correlations.clear(); 00153 for(int i=0; i<distr->length(); i++) 00154 { 00155 input_value.resize(distr->width()); 00156 distr->getRow(i, input_value); 00157 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00158 f_error->fprop(input_value, error_output_value); 00159 f_candidate->fprop(input_value.subVec(0,distr->inputsize()),candidate_output_value); 00160 for(int j=0; j<error_correlations.length(); j++) 00161 error_correlations[j] += (candidate_output_value[0]-mean_candidate)*(error_output_value[j]-mean_error[j]); 00162 } 00163 for(int j=0; j<error_correlations.length(); j++) 00164 value[0] -= abs(error_correlations[j]); 00165 value[0] /= distr->length(); 00166 } 00167 00168 00169 void CCCostVariable::bprop() 00170 { fbprop(); } 00171 00172 00173 void CCCostVariable::fbprop() 00174 { 00175 fprop(); 00176 00177 for(int i=0; i<distr->length(); i++) 00178 { 00179 input_value.resize(distr->width()); 00180 distr->getRow(i, input_value); 00181 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00182 f_error->fprop(input_value, error_output_value); 00183 for(int j=0; j<error_correlations.length(); j++) 00184 { 00185 00186 adjusted_gradient[0] = -1*gradient[0]*(error_output_value[j]-mean_error[j]) 00187 * (error_correlations[j] > 0 ? 1 : -1)/distr->length(); 00188 f_candidate->fbprop(input_value.subVec(0,distr->inputsize()),candidate_output_value,input_gradient, adjusted_gradient); // could be more efficient (do just a bprop: not implemented) 00189 } 00190 } 00191 00192 } 00193 00194 00195 void CCCostVariable::symbolicBprop() 00196 { 00197 PLERROR("In CCCostVariable::symbolicBprop() : Not implemented"); 00198 } 00199 00200 void CCCostVariable::rfprop() 00201 { 00202 PLERROR("In CCCostVariable::rfprop() : Not implemented"); 00203 } 00204 00205 00206 void CCCostVariable::printInfo(bool print_gradient) 00207 { 00208 00209 fprop(); 00210 00211 for(int i=0; i<distr->length(); i++) 00212 { 00213 input_value.resize(distr->width()); 00214 distr->getRow(i, input_value); 00215 input_value.resize(distr->inputsize()+distr->targetsize()+distr->weightsize()); 00216 f_error->fprop(input_value, error_output_value); 00217 for(int j=0; j<error_correlations.length(); j++) 00218 { 00219 00220 adjusted_gradient[0] = -1*gradient[0]*(error_output_value[j]-mean_error[j]) 00221 * (error_correlations[j] > 0 ? 1 : -1); 00222 f_candidate->fbprop(input_value.subVec(0,distr->inputsize()),candidate_output_value,input_gradient, adjusted_gradient); // could be more efficient 00223 } 00224 f_candidate->fproppath.printInfo(print_gradient); 00225 } 00226 00227 cout << info() << " : " << getName() << " = " << value; 00228 if (print_gradient) cout << " gradient=" << gradient; 00229 cout << endl; 00230 } 00231 00232 00233 00234 } // end of namespace PLearn 00235 00236 00237 /* 00238 Local Variables: 00239 mode:c++ 00240 c-basic-offset:4 00241 c-file-style:"stroustrup" 00242 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00243 indent-tabs-mode:nil 00244 fill-column:79 00245 End: 00246 */ 00247 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :