PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: WeightedLogGaussian.cc 2052 2004-07-19 22:31:11Z Dan Popovici $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #include "WeightedLogGaussian.h" 00044 #include <plearn/var/Var_utils.h> 00045 #include "Molecule.h" 00046 //#include "linearalign.h" 00047 #include "icpalign.h" 00048 namespace PLearn { 00049 using namespace std; 00050 00051 00054 PLEARN_IMPLEMENT_OBJECT(WeightedLogGaussian, 00055 "Variable that computes log P(X | C_k) for the MoleculeTemplateLearner", 00056 "-1/2 \\sum_{t,t'}w_{tt'}.....\n" 00057 ); 00058 00059 WeightedLogGaussian::WeightedLogGaussian(bool the_training_mode , int the_class_label, Var input_index, Var mu, Var sigma, MoleculeTemplate the_template) 00060 : inherited(input_index & mu & sigma, 1 , 1) 00061 { 00062 build_(); 00063 class_label = the_class_label ; 00064 current_template = the_template ; 00065 training_mode = the_training_mode ; 00066 } 00067 00068 void 00069 WeightedLogGaussian::build() 00070 { 00071 inherited::build(); 00072 build_(); 00073 } 00074 00075 void 00076 WeightedLogGaussian::build_() 00077 { 00078 } 00079 00080 void WeightedLogGaussian::recomputeSize(int& l, int& w) const 00081 { 00082 if (varray.size()) { 00083 l = 1; 00084 w = 1; 00085 } else 00086 l = w = 0; 00087 } 00088 00089 void WeightedLogGaussian::fprop() 00090 { 00091 real ret = 0.0 ; 00092 int p = mu()->width() ; 00093 int training_index = input_index()->value[0] ; 00094 00095 if (! training_mode ) // read in the file only once 00096 { 00097 string filename = test_set->getValString(0, input_index()->value[0]) ; 00098 molecule = Molecule::readMolecule(filename) ; 00099 molecule->vrml_file = filename+".vrml" ; 00100 } 00101 00102 ::align(molecule->vrml_file,molecule->chem,current_template->vrml_file , current_template->chem, W_lp) ; 00103 int n = W_lp.width() ; 00104 int m = W_lp.length() ; 00105 00106 // printf("(%d %d) -> length allignment = (%d %d) \n" ,training_index , class_label,n,m) ; 00107 00108 Mat input ; 00109 input = molecule->chem ; 00110 00111 for (int i=0 ; i<n ; ++i) { 00112 for (int j=0 ; j<m ; ++j) { 00113 for (int k=0 ; k<p ; ++k) { 00114 ret += W_lp[j][i] * square((input[j][k] - mu()->matValue[i][k]))/square(sigma()->matValue[i][k]) ; 00115 } 00116 } 00117 } 00118 00119 ret *= - 0.5 ; 00120 00121 for (int i=0 ; i<n ; ++i) { 00122 for (int k=0 ; k<p ; ++k) { 00123 ret -= pl_log(sigma()->matValue[i][k]) ; 00124 // if (sigma()->matValue[i][k] > 20){ 00125 // cout << sigma()->matValue[i][k] ; 00126 // cout << ret ; 00127 // } 00128 } 00129 } 00130 00131 00132 if (isnan(ret)) 00133 PLERROR("NAN") ; 00134 00135 valuedata[0] = ret ; 00136 } 00137 00138 inline double cube(double x){ 00139 return x*x*x ; 00140 } 00141 00142 void WeightedLogGaussian::bprop() 00143 { 00144 int n = mu()->length() ; 00145 int p = mu()->width() ; 00146 // Mat W_lp ; 00147 Mat input ; 00148 // int training_index = input_index()->value[0] ; 00149 00150 input = (molecule)->chem ; 00151 00152 00153 int m = W_lp.length() ; 00154 00155 00156 // cout << "MATGRDIENT" << class_label<< endl ; 00157 00158 00159 for (int i=0 ; i<n ; ++i) { 00160 for (int k=0 ; k<p ; ++k) { 00161 sigma()->matGradient[i][k] -= gradientdata[0] / sigma()->matValue[i][k] ; 00162 for (int j=0 ; j<m ; ++j) { 00163 mu()->matGradient[i][k] += gradientdata[0] * W_lp[j][i] * (input[j][k] - mu()->matValue[i][k]) / square(sigma()->matValue[i][k]) ; 00164 sigma()->matGradient[i][k] += gradientdata[0] * W_lp[j][i] * square(input[j][k] - mu()->matValue[i][k]) / cube(sigma()->matValue[i][k]) ; 00165 00166 if (fabs(sigma()->matGradient[i][k]) > 5) { 00167 // char buf[20] ; 00168 // fprintf(buf , "error-%d-%d",, ) ; 00169 // FILE * fo = fopen("error.txt","wt") ; 00170 // fprintf(fo , "gradientdata[0] = %lf \n" , gradientdata[0]) ; 00171 // fprintf(fo , " W_lp[training_index]->matValue[j][i]= %lf \n" , W_lp[training_index]->matValue[j][i]) ; 00172 // fprintf(fo , " input[j][k] = %lf \n" , input[j][k]) ; 00173 // fprintf(fo , " mu()->matValue[i][k] = %lf \n" , mu()->matValue[i][k]) ; 00174 // printf(" sigma()->matValue[i][k] = %lf \n" , sigma()->matValue[i][k]) ; 00175 // fclose(fo) ; 00176 // exit(1) ; 00177 } 00178 00179 00180 } 00181 // cout << sigma()->matValue[i][k] << endl ; 00182 } 00183 } 00184 cout << "Grad Data "<< gradientdata[0] << endl ; 00185 // cout << "sigmagrad " <<sigma()->matGradient << endl ; 00186 // sigma()->matGradiant = 00187 00188 } 00189 void WeightedLogGaussian::symbolicBprop() 00190 { 00191 // input->accg(g * (input<=threshold)); 00192 } 00193 00194 } // end of namespace PLearn 00195 00196