PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // VBoundDBN2.cc 00004 // 00005 // Copyright (C) 2007 yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: yoshua Bengio 00036 00040 #include <plearn_learners/online/RBMMatrixConnection.h> 00041 #include "VBoundDBN2.h" 00042 00043 namespace PLearn { 00044 using namespace std; 00045 00046 PLEARN_IMPLEMENT_OBJECT( 00047 VBoundDBN2, 00048 "2-RBM DBN trained using Hinton's new variational bound of global likelihood", 00049 "The bound that is maximized is the following:\n" 00050 " log P(x) >= -FE1(x) + E_{P1(h|x)}[ FE1(h) - FE2(h) ] - log Z2\n" 00051 "where P1 and P2 are RBMs with Pi(x) = exp(-FEi(x))/Zi.\n" 00052 ); 00053 00055 // VBoundDBN2 // 00057 VBoundDBN2::VBoundDBN2() 00058 { 00059 } 00060 00062 // declareOptions // 00064 void VBoundDBN2::declareOptions(OptionList& ol) 00065 { 00066 declareOption(ol, "rbm1", &VBoundDBN2::rbm1, 00067 OptionBase::buildoption, 00068 "First RBM, taking the DBN's input in its visible layer"); 00069 declareOption(ol, "rbm2", &VBoundDBN2::rbm2, 00070 OptionBase::buildoption, 00071 "Second RBM, producing the DBN's output and generating internal representations."); 00072 00073 // Now call the parent class' declareOptions 00074 inherited::declareOptions(ol); 00075 } 00076 00078 // build_ // 00080 void VBoundDBN2::build_() 00081 { 00082 if (random_gen) 00083 { 00084 if (rbm1 && !rbm1->random_gen) 00085 { 00086 rbm1->random_gen = random_gen; 00087 rbm1->build(); 00088 rbm1->forget(); 00089 } 00090 if (rbm2 && !rbm2->random_gen) 00091 { 00092 rbm2->random_gen = random_gen; 00093 rbm2->build(); 00094 rbm2->forget(); 00095 } 00096 } 00097 if (ports.length()==0) 00098 { 00099 ports.append("input"); // 0 00100 ports.append("bound"); // 1 00101 ports.append("nll"); // 2 00102 ports.append("sampled_h"); // 3 00103 ports.append("global_improvement"); // 4 00104 ports.append("ph_given_v"); // 5 00105 ports.append("p2ph"); // 6 00106 } 00107 } 00108 00110 // build // 00112 void VBoundDBN2::build() 00113 { 00114 inherited::build(); 00115 build_(); 00116 } 00117 00119 // bpropAccUpdate // 00121 void VBoundDBN2::bpropAccUpdate(const TVec<Mat*>& ports_value, 00122 const TVec<Mat*>& ports_gradient) 00123 { 00124 PLASSERT( ports_value.length() == nPorts() && ports_gradient.length() == nPorts()); 00125 PLASSERT( rbm1 && rbm2); 00126 00127 Mat* input = ports_value[0]; 00128 Mat* sampled_h_ = ports_value[3]; // a state if input is given 00129 Mat* global_improvement_ = ports_value[4]; // a state if input is given 00130 Mat* ph_given_v_ = ports_value[5]; // a state if input is given 00131 Mat* p2ph_ = ports_value[6]; // same story 00132 int mbs = input->length(); 00133 PLASSERT( input && sampled_h_ && global_improvement_ 00134 && ph_given_v_ && p2ph_); 00135 00136 // do CD on rbm2 00137 rbm2->setAllLearningRates(rbm2->cd_learning_rate); 00138 rbm2->hidden_layer->setExpectations(*p2ph_); 00139 rbm2->hidden_layer->generateSamples(); 00140 rbm2->sampleVisibleGivenHidden(rbm2->hidden_layer->samples); 00141 rbm2->computeHiddenActivations(rbm2->visible_layer->samples); 00142 rbm2->hidden_layer->computeExpectations(); 00143 rbm2->visible_layer->update(*sampled_h_,rbm2->visible_layer->samples); 00144 rbm2->connection->update(*sampled_h_,*p2ph_, 00145 rbm2->visible_layer->samples, 00146 rbm2->hidden_layer->getExpectations()); 00147 rbm2->hidden_layer->update(*p2ph_,rbm2->hidden_layer->getExpectations()); 00148 00149 // for now do the ugly hack, for binomial + MatrixConnection case 00150 PLASSERT(rbm1->visible_layer->classname()=="RBMBinomialLayer"); 00151 PLASSERT(rbm1->hidden_layer->classname()=="RBMBinomialLayer"); 00152 PLASSERT(rbm1->connection->classname() == "RBMMatrixConnection"); 00153 Mat& weights = ((RBMMatrixConnection*) 00154 get_pointer(rbm1->connection))->weights; 00155 static Mat delta_W; 00156 static Vec delta_hb; 00157 static Vec delta_vb1; 00158 static Vec delta_vb2; 00159 static Mat delta_h; 00160 delta_W.resize(rbm1->hidden_layer->size,rbm1->visible_layer->size); 00161 delta_hb.resize(rbm1->hidden_layer->size); 00162 delta_vb1.resize(rbm1->visible_layer->size); 00163 delta_vb2.resize(rbm1->visible_layer->size); 00164 delta_h.resize(mbs,rbm1->hidden_layer->size); 00165 00166 // reconstruct the input 00167 rbm1->computeVisibleActivations(*sampled_h_); 00168 rbm1->visible_layer->computeExpectations(); 00169 Mat reconstructed_v = rbm1->visible_layer->getExpectations(); 00170 00171 // compute RBM1 weight negative gradient 00172 // dlogbound/dWij sampling approx = (ph_given_v[i] + (h[i]-ph_given_v[i])*global_improvement)*v[j] - h[i]*reconstructed_v[j] 00173 substract(*sampled_h_, *ph_given_v_, delta_h); 00174 multiply(delta_h, delta_h, global_improvement_->toVec()); 00175 delta_h += *ph_given_v_; 00176 productScaleAcc(delta_W, delta_h, true, *input, false, 1., 0.); 00177 productScaleAcc(delta_W, *sampled_h_, true, reconstructed_v, false, -1., 1.); 00178 // update the weights 00179 multiplyAcc(weights, delta_W, rbm1->cd_learning_rate); 00180 00181 // do the biases now 00182 // dlogbound/dbi sampling approx = (ph_given_v[i] + (h[i]-ph_given_v[i])*global_improvement) - h[i] 00183 substract(delta_h, *sampled_h_, delta_h); 00184 columnSum(delta_h,delta_hb); 00185 multiplyAcc(rbm1->hidden_layer->bias,delta_hb,rbm1->cd_learning_rate); 00186 00187 // dlogbound/dji sampling approx = v[j] - reconstructed_v[j] 00188 columnSum(reconstructed_v,delta_vb1); 00189 columnSum(*input,delta_vb2); 00190 substract(delta_vb2,delta_vb1,delta_vb1); 00191 multiplyAcc(rbm1->visible_layer->bias,delta_vb1,rbm1->cd_learning_rate); 00192 00193 // Ensure all required gradients have been computed. 00194 checkProp(ports_gradient); 00195 } 00196 00198 // bpropDoesNothing // 00200 /* THIS METHOD IS OPTIONAL 00201 // the default implementation returns false 00202 bool VBoundDBN2::bpropDoesNothing() 00203 { 00204 } 00205 */ 00206 00208 // finalize // 00210 /* THIS METHOD IS OPTIONAL 00211 void VBoundDBN2::finalize() 00212 { 00213 } 00214 */ 00215 00217 // forget // 00219 void VBoundDBN2::forget() 00220 { 00221 if (rbm1 && rbm2) 00222 { 00223 rbm1->forget(); 00224 rbm2->forget(); 00225 } 00226 } 00227 00229 // fprop // 00231 void VBoundDBN2::fprop(const TVec<Mat*>& ports_value) 00232 { 00233 PLASSERT( ports_value.length() == nPorts() ); 00234 PLASSERT( rbm1 && rbm2); 00235 00236 Mat* input = ports_value[0]; 00237 Mat* bound = ports_value[1]; 00238 Mat* nll = ports_value[2]; 00239 Mat* sampled_h_ = ports_value[3]; // a state if input is given 00240 Mat* global_improvement_ = ports_value[4]; // a state if input is given 00241 Mat* ph_given_v_ = ports_value[5]; // a state if input is given 00242 Mat* p2ph_ = ports_value[6]; // same story 00243 00244 // fprop has two modes: 00245 // 1) input is given (presumably for learning, or measuring bound or nll) 00246 // 2) input is not given and we want to generate one 00247 // 00248 00249 // for learning or testing 00250 if (input && !input->isEmpty()) 00251 { 00252 int mbs=input->length(); 00253 FE1v.resize(mbs,1); 00254 FE1h.resize(mbs,1); 00255 FE2h.resize(mbs,1); 00256 Mat* sampled_h = sampled_h_?sampled_h_:&sampled_h_state; 00257 Mat* global_improvement = global_improvement_?global_improvement_:&global_improvement_state; 00258 Mat* ph_given_v = ph_given_v_?ph_given_v_:&ph_given_v_state; 00259 Mat* p2ph = p2ph_?p2ph_:&p2ph_state; 00260 sampled_h->resize(mbs,rbm1->hidden_layer->size); 00261 global_improvement->resize(mbs,1); 00262 ph_given_v->resize(mbs,rbm1->hidden_layer->size); 00263 00264 // compute things needed for everything else 00265 00266 rbm1->sampleHiddenGivenVisible(*input); 00267 *ph_given_v << rbm1->hidden_layer->getExpectations(); 00268 *sampled_h << rbm1->hidden_layer->samples; 00269 rbm1->computeFreeEnergyOfVisible(*input,FE1v,false); 00270 rbm1->computeFreeEnergyOfHidden(*sampled_h,FE1h); 00271 rbm2->computeFreeEnergyOfVisible(*sampled_h,FE2h,false); 00272 p2ph->resize(mbs,rbm2->hidden_layer->size); 00273 *p2ph << rbm2->hidden_layer->getExpectations(); 00274 substract(FE1h,FE2h,*global_improvement); 00275 00276 if (bound) // actually minus the bound, to be in same units as nll, only computed exactly during test 00277 { 00278 PLASSERT(bound->isEmpty()); 00279 bound->resize(mbs,1); 00280 00281 if (rbm2->partition_function_is_stale && !during_training) 00282 rbm2->computePartitionFunction(); 00283 *bound << FE1v; 00284 *bound -= *global_improvement; 00285 *bound += rbm2->log_partition_function; 00286 } 00287 if (nll) // exact -log P(input) = - log sum_h P2(h) P1(input|h) 00288 { 00289 PLASSERT( nll->isEmpty() ); 00290 int n_h_configurations = 1 << rbm1->hidden_layer->size; 00291 if (all_h.length()!=n_h_configurations || all_h.width()!=rbm1->hidden_layer->size) 00292 { 00293 all_h.resize(n_h_configurations,rbm1->hidden_layer->size); 00294 for (int c=0;c<n_h_configurations;c++) 00295 { 00296 int N=c; 00297 for (int i=0;i<rbm1->hidden_layer->size;i++) 00298 { 00299 all_h(c,i) = N&1; 00300 N >>= 1; 00301 } 00302 } 00303 } 00304 // compute -log P2(h) for each possible h configuration 00305 if (rbm2->partition_function_is_stale && !during_training) 00306 rbm2->computePartitionFunction(); 00307 neglogP2h.resize(n_h_configurations, 1); 00308 rbm2->computeFreeEnergyOfVisible(all_h, neglogP2h, false); 00309 neglogP2h += rbm2->log_partition_function; 00310 /* 00311 if (!during_training) { 00312 // Debug code to ensure probabilities sum to 1. 00313 real check = 0; 00314 real check2 = 0; 00315 for (int c = 0; c < n_h_configurations; c++) { 00316 check2 += exp(- neglogP2h(c, 0)); 00317 if (c == 0) 00318 check = - neglogP2h(c, 0); 00319 else 00320 check = logadd(check, - neglogP2h(c, 0)); 00321 } 00322 pout << check << endl; 00323 pout << check2 << endl; 00324 } 00325 */ 00326 rbm1->computeNegLogPVisibleGivenPHidden(*input,all_h,&neglogP2h,*nll); 00327 } 00328 } 00329 // Ensure all required ports have been computed. 00330 checkProp(ports_value); 00331 } 00332 00334 // getPortIndex // 00336 /* Optional 00337 int VBoundDBN2::getPortIndex(const string& port) 00338 {} 00339 */ 00340 00342 // getPorts // 00344 const TVec<string>& VBoundDBN2::getPorts() { 00345 return ports; 00346 } 00347 00349 // getPortSizes // 00351 const TMat<int>& VBoundDBN2::getPortSizes() { 00352 PLASSERT(rbm1 && rbm2); 00353 if (sizes.width()!=2) 00354 { 00355 sizes.resize(nPorts(),2); 00356 sizes.fill(-1); 00357 sizes(0,1)=rbm1->visible_layer->size; 00358 sizes(1,1)=1; 00359 sizes(2,1)=1; 00360 sizes(3,1)=rbm1->hidden_layer->size; 00361 sizes(4,1)=1; 00362 sizes(5,1)=rbm1->hidden_layer->size; 00363 sizes(6,1)=rbm2->hidden_layer->size; 00364 } 00365 return sizes; 00366 } 00367 00369 // makeDeepCopyFromShallowCopy // 00371 void VBoundDBN2::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00372 { 00373 inherited::makeDeepCopyFromShallowCopy(copies); 00374 deepCopyField(rbm1, copies); 00375 deepCopyField(rbm2, copies); 00376 deepCopyField(FE1v, copies); 00377 deepCopyField(FE1h, copies); 00378 deepCopyField(FE2h, copies); 00379 deepCopyField(sampled_h_state, copies); 00380 deepCopyField(global_improvement_state, copies); 00381 deepCopyField(ph_given_v_state, copies); 00382 deepCopyField(p2ph_state, copies); 00383 deepCopyField(all_h, copies); 00384 deepCopyField(all_h, copies); 00385 deepCopyField(neglogP2h, copies); 00386 deepCopyField(ports, copies); 00387 } 00388 00390 // setLearningRate // 00392 /* OPTIONAL 00393 // The default implementation raises a warning and does not do anything. 00394 void VBoundDBN2::setLearningRate(real dynamic_learning_rate) 00395 { 00396 } 00397 */ 00398 00399 00400 } 00401 // end of namespace PLearn 00402 00403 00404 /* 00405 Local Variables: 00406 mode:c++ 00407 c-basic-offset:4 00408 c-file-style:"stroustrup" 00409 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00410 indent-tabs-mode:nil 00411 fill-column:79 00412 End: 00413 */ 00414 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :