PLearn 0.1
|
00001 // TreeDBNModule.cc 00002 // 00003 // Copyright (C) 2007 Vytenis Sakenas 00004 // 00005 // Redistribution and use in source and binary forms, with or without 00006 // modification, are permitted provided that the following conditions are met: 00007 // 00008 // 1. Redistributions of source code must retain the above copyright 00009 // notice, this list of conditions and the following disclaimer. 00010 // 00011 // 2. Redistributions in binary form must reproduce the above copyright 00012 // notice, this list of conditions and the following disclaimer in the 00013 // documentation and/or other materials provided with the distribution. 00014 // 00015 // 3. The name of the authors may not be used to endorse or promote 00016 // products derived from this software without specific prior written 00017 // permission. 00018 // 00019 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00020 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00021 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00022 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00023 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00024 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00025 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00026 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00027 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00028 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00029 // 00030 // This file is part of the PLearn library. For more information on the PLearn 00031 // library, go to the PLearn Web site at www.plearn.org 00032 00033 // Authors: Vytenis Sakenas 00034 00039 #include "TreeDBNModule.h" 00040 00041 namespace PLearn { 00042 using namespace std; 00043 00044 PLEARN_IMPLEMENT_OBJECT( 00045 TreeDBNModule, 00046 "Hierarchical deep network.", 00047 "Hierarchical deep network. In every level, a RBM takes input from n_parents_per_node lower" 00048 " layer RBMs. All RBMs in a layer share weights. So, for example, a network with 3 layers and" 00049 " n_parents_per_node=2 will have 1, 2 and 4 RBMs in top, middle and bottom layers respectively." 00050 " Typical usage is providing RBM modules for every layer through modules option, possibly adding " 00051 "additional ports we want to compute and setting flags like propagate_gradient, propagate_energy_gradient" 00052 " and propagate_full_gradient to a desired state." 00053 "Ports:\n" 00054 "\tinput, output_1 ... output_n" 00055 "where n is number of layers" 00056 ); 00057 00059 // TreeDBNModule // 00061 TreeDBNModule::TreeDBNModule() : n_parents_per_node(2), n_shared_parents(0), gradient_multiplier(1.0), 00062 propagate_gradient(false), propagate_energy_gradient(false), propagate_full_gradient(false) 00063 /* ### Initialize all fields to their default value here */ 00064 { 00065 } 00066 00068 // declareOptions // 00070 void TreeDBNModule::declareOptions(OptionList& ol) 00071 { 00072 // Now call the parent class' declareOptions 00073 inherited::declareOptions(ol); 00074 00075 declareOption(ol, "modules", &TreeDBNModule::modules, 00076 OptionBase::buildoption, 00077 "RBMModule list that is used to build DBN."); 00078 00079 declareOption(ol, "n_parents_per_node", &TreeDBNModule::n_parents_per_node, 00080 OptionBase::buildoption, 00081 "How many parents each node has."); 00082 00083 // Not implemented. 00084 //declareOption(ol, "n_shared_parents", &TreeDBNModule::n_shared_parents, 00085 // OptionBase::buildoption, 00086 // "Number of parents that two adjacent nodes share."); 00087 00088 declareOption(ol, "propagate_gradient", &TreeDBNModule::propagate_gradient, 00089 OptionBase::buildoption, 00090 "Whether we propagate gradient through hierarchy."); 00091 00092 declareOption(ol, "propagate_full_gradient", &TreeDBNModule::propagate_full_gradient, 00093 OptionBase::buildoption, 00094 "If propagate_gradient==true then this flag determines that gradient should be propagated" 00095 " through full hierarchy. Else propagation is only done through the rightmost branch."); 00096 00097 declareOption(ol, "propagate_energy_gradient", &TreeDBNModule::propagate_energy_gradient, 00098 OptionBase::buildoption, 00099 "Whether we compute and propagate free energy gradient from top layer."); 00100 00101 // Probabaly not useful. 00102 declareOption(ol, "gradient_multiplier", &TreeDBNModule::gradient_multiplier, 00103 OptionBase::buildoption, 00104 "Value that propagated gradient is multiplied before propagating from top layer."); 00105 00106 declareOption(ol, "ports", &TreeDBNModule::ports, 00107 OptionBase::buildoption, 00108 "A sequence of pairs of strings, where each pair is of the form\n" 00109 "\"P\":\"M.N\" with 'M' the name of an underlying module, 'N' one of\n" 00110 "its ports, and 'P' the name under which the TreeDBNModule sees this\n" 00111 "port. See the class help for an example. If 'P' is an empty string,\n" 00112 "then the port name will be 'M.N'."); 00113 00114 } 00115 00116 00117 00119 // declareMethods // 00121 void TreeDBNModule::declareMethods(RemoteMethodMap& rmm) 00122 { 00123 // Insert a backpointer to remote methods; note that this 00124 // different than for declareOptions() 00125 rmm.inherited(inherited::_getRemoteMethodMap_()); 00126 00127 declareMethod( 00128 rmm, "initSampling", &TreeDBNModule::initSampling, 00129 (BodyDoc("Initializes network for sampling. This function must be called before any calls to sample().\n"), 00130 ArgDoc ("gibbsTop", "Number of gibbs steps to do in top rbm."))); 00131 00132 declareMethod( 00133 rmm, "clearCache", &TreeDBNModule::clearCache, 00134 (BodyDoc("Clears all caches. Call this after changing any of the module parameters.\n"))); 00135 00136 declareMethod( 00137 rmm, "sample", &TreeDBNModule::sample, 00138 (BodyDoc("Samples the network. Returns a sample on the visible layer.\n"), 00139 ArgDoc("gibbsTop", "Number of gibbs steps in the top layer for each sample."), 00140 RetDoc ("Sample."))); 00141 } 00142 00147 void TreeDBNModule::appendPort(string name, int rbm_index, string port_name, int port_width = -1) 00148 { 00149 port_names.append(name); 00150 port_rbms.append(rbm_index); 00151 00152 if (rbm_index >= 0) { 00153 int index = modules[rbm_index]->getPortIndex(port_name); 00154 PLASSERT(index >= 0); 00155 port_index.append( index ); 00156 } 00157 else 00158 port_index.append( -1 ); 00159 00160 if (port_width == -1) { 00161 // We need to extract actual port size 00162 port_width = modules[rbm_index]->getPortWidth(port_name); 00163 } 00164 00165 TVec <int> sz(2, -1); 00166 sz[1] = port_width; 00167 port_sizes.appendRow(sz); 00168 } 00169 00171 // build_ // 00173 void TreeDBNModule::build_() 00174 { 00175 n_layers = modules.length(); 00176 time = 0; 00177 00178 // Fill ports 00179 port_names.clear(); 00180 port_rbms.clear(); 00181 port_index.clear(); 00182 port_sizes.clear(); 00183 appendPort("input", -1, "", modules[0]->visible_layer->size); 00184 00185 layer_sizes.resize(n_layers); 00186 00187 // Add output ports for every layer rbm 00188 for (int i = 1; i <= n_layers; ++i) { 00189 appendPort("output_" + tostring(i), i-1, "hidden.state"); 00190 layer_sizes[i-1] = 1<<(n_layers-i); 00191 } 00192 00193 // Add ports that are forwarded from internal modules 00194 for (int i = 0; i < ports.size(); ++i) { 00195 string s = ports[i].second; 00196 00197 size_t dot = s.find('.'); 00198 PLASSERT( dot != string::npos ); 00199 string module_name = s.substr(0, dot); 00200 string port_name = s.substr(dot + 1); 00201 00202 bool valid_redirect = false; 00203 for (int j = 0; j < n_layers; ++j) { 00204 if (modules[j]->name == module_name) { 00205 appendPort(ports[i].first, j, port_name); 00206 valid_redirect = true; 00207 } 00208 } 00209 00210 PLASSERT(valid_redirect); 00211 } 00212 00213 // Make sure storage matrix vectors will not be resized and we will not loose pointers. 00214 mats.resize(1000); 00215 mats.resize(0); 00216 cache_mats.resize(1000); 00217 cache_mats.resize(0); 00218 00219 step_size.resize(n_layers); 00220 step_size[0] = 2; 00221 for (int i = 1; i < n_layers; ++i) { 00222 step_size[i] = n_parents_per_node * step_size[i-1]; 00223 } 00224 00225 // Prepare arrays for holding fprop and bprop data 00226 bprop_data.resize(n_layers); 00227 fprop_data.resize(n_layers); 00228 bprop_data_cache.resize(n_layers); // do not cache (?) 00229 fprop_data_cache.resize(n_layers); 00230 00231 for (int i = 0; i < n_layers; ++i) { 00232 int np = modules[i]->nPorts(); 00233 bprop_data[i].resize(np); 00234 fprop_data[i].resize(np); 00235 bprop_data_cache[i].resize(np); 00236 fprop_data_cache[i].resize(np); 00237 bprop_data[i].fill((Mat*)NULL); 00238 fprop_data[i].fill((Mat*)NULL); 00239 bprop_data_cache[i].fill((Mat*)NULL); 00240 fprop_data_cache[i].fill((Mat*)NULL); 00241 } 00242 00243 // Here we will hold last full input to lower layer 00244 // It is done to be able to check if input is a shifted 00245 // version of previous input. 00246 last_full_input.resize(0); 00247 00248 // Safety check 00249 for (int i = 0; i < n_layers-1; ++i) 00250 PLASSERT(modules[i]->hidden_layer->size * n_parents_per_node == modules[i+1]->visible_layer->size); 00251 00252 // Forward random number generator to all underlying modules. 00253 if (random_gen) { 00254 cout << "Forget in build" << endl; 00255 for (int i = 0; i < modules.length(); i++) { 00256 if (!modules[i]->random_gen) { 00257 cout << "pass forget" << endl; 00258 modules[i]->random_gen = random_gen; 00259 modules[i]->build(); 00260 modules[i]->forget(); 00261 } 00262 } 00263 } 00264 } 00265 00267 // build // 00269 void TreeDBNModule::build() 00270 { 00271 inherited::build(); 00272 build_(); 00273 Profiler::activate(); 00274 } 00275 00277 // bpropAccUpdate // 00279 void TreeDBNModule::bpropAccUpdate(const TVec<Mat*>& ports_value, 00280 const TVec<Mat*>& ports_gradient) 00281 { 00282 PLASSERT( ports_value.length() == nPorts() && ports_gradient.length() == nPorts()); 00283 00284 Profiler::start("full bprop"); 00285 if (!propagate_gradient) { // Only unsupervised learning in a module 00286 for (int layer = n_layers-1; layer >= 0; layer--) { 00287 int n_mod_ports = modules[layer]->nPorts(); 00288 00289 bprop_data[layer].resize(n_mod_ports); 00290 bprop_data[layer].fill((Mat*)NULL); 00291 int mod_batch_size = fprop_data[layer][modules[layer]->getPortIndex("hidden.state")]->length(); 00292 00293 if (modules[layer]->reconstruction_connection != NULL) { 00294 bprop_data[layer][modules[layer]->getPortIndex("reconstruction_error.state")] = createMatrix(mod_batch_size, 1, mats); 00295 bprop_data[layer][modules[layer]->getPortIndex("reconstruction_error.state")]->fill(1); 00296 } 00297 00298 Profiler::start("bprop"); 00299 modules[layer]->bpropAccUpdate(fprop_data[layer], bprop_data[layer]); 00300 Profiler::end("bprop"); 00301 } 00302 } else 00303 { 00304 if (!propagate_full_gradient) // Propagate only rightmost branch 00305 { 00306 // For top RBM we provide energy gradient only and get gradient on visible 00307 bprop_data[n_layers - 1].resize( modules[n_layers-1]->nPorts() ); 00308 bprop_data[n_layers - 1].fill((Mat*)NULL); 00309 00310 int mod_batch_size = fprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")]->length(); 00311 00312 if (propagate_energy_gradient) { 00313 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("energy")] = createMatrix(mod_batch_size, 1, mats); 00314 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("energy")]->fill(1); 00315 } 00316 00317 if (modules[n_layers-1]->reconstruction_connection != NULL) { 00318 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("reconstruction_error.state")] = 00319 createMatrix(mod_batch_size, 1, mats); 00320 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("reconstruction_error.state")]->fill(1); 00321 } 00322 00323 // Take external gradient on output 00324 int out_grad = getPortIndex("output_"+tostring(n_layers)); 00325 00326 if ( ports_gradient[out_grad] == NULL || ports_gradient[out_grad]->isEmpty() ) { 00327 // Make gradient zero 00328 ports_gradient[out_grad] = createMatrix(mod_batch_size, modules[n_layers-1]->hidden_layer->size, mats); 00329 ports_gradient[out_grad]->fill(0); 00330 PLWARNING("Top RBM output port has no gradient information. Using 0 gradient."); 00331 } 00332 //PLASSERT(ports_gradient[out_grad] != NULL); 00333 00334 00335 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("hidden.state")] = 00336 createMatrix(mod_batch_size, ports_gradient[out_grad]->width(), mats); 00337 *bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("hidden.state")] << *ports_gradient[out_grad]; 00338 00339 // Ask for visible gradient 00340 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")] = 00341 createMatrix(0, modules[n_layers-1]->visible_layer->size, mats); 00342 00343 Profiler::start("bprop"); 00344 modules[n_layers-1]->bpropAccUpdate(fprop_data[n_layers-1], bprop_data[n_layers-1]); 00345 Profiler::end("bprop"); 00346 00347 00348 Mat *mat = bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")]; 00349 for (int i = 0; i < mat->length(); ++i) 00350 for (int j = 0; j < mat->width(); ++j) 00351 (*mat)[i][j] *= gradient_multiplier; 00352 00353 00354 // Now for every layer take upper layers visible gradient 00355 // and pass it to current layers hidden.state port. 00356 for (int layer = n_layers-1; layer > 0; layer--) { 00357 int n_mod_ports = modules[layer-1]->nPorts(); 00358 00359 bprop_data[layer-1].resize(n_mod_ports); 00360 bprop_data[layer-1].fill((Mat*)NULL); 00361 00362 int mod_batch_size = fprop_data[layer-1][modules[layer-1]->getPortIndex("visible")]->length(); 00363 int width = modules[layer-1]->hidden_layer->size; 00364 00365 00366 Mat *hidden_state = createMatrix(mod_batch_size, width, mats); 00367 Mat *rbm_visible = bprop_data[layer][modules[layer]->getPortIndex("visible")]; 00368 00369 int parent_width = modules[layer-1]->hidden_layer->size; 00370 int minibatch_size = ports_value[getPortIndex("input")]->length(); 00371 00372 TVec <int> used(mod_batch_size, 0); // Ensure that we right gradient only once (the one we need is first one) 00373 00374 // do the same thing like in fprop 00375 for (int mbi = 0, index = 0; mbi < minibatch_size; ++mbi) 00376 { 00377 if (mbi_time[mbi] < step_size[layer]) { 00378 // Computed all rbms in upper layer 00379 for (int i = 0; i < layer_sizes[layer]; ++i) 00380 { 00381 // Here parents are this layer rbm (where we want to write gradient) 00382 for (int parent = 0; parent < n_parents_per_node; ++parent) { 00383 int row_id = mod_batch_length[layer-1][mbi] - 00384 hash(mbi_time[mbi], layer-1, 2*i + parent); 00385 if (row_id < 0) { 00386 // It must be in cache - do nothing 00387 } else { 00388 if (!used[row_id]) 00389 (*hidden_state)(row_id) << 00390 (*rbm_visible)(index).subVec(parent*parent_width, parent_width); 00391 used[row_id]++; 00392 } 00393 } 00394 ++index; 00395 } 00396 } else { 00397 // Compute only last rbm 00398 for (int parent = 0; parent < n_parents_per_node; ++parent) { 00399 int row_id = mod_batch_length[layer-1][mbi] - 00400 hash(mbi_time[mbi], layer-1, 2*(layer_sizes[layer]-1) + parent); 00401 if (row_id < 0) { 00402 // It must be in cache - do nothing 00403 } else { 00404 if (!used[row_id]) 00405 (*hidden_state)(row_id) << 00406 (*rbm_visible)(index).subVec(parent*parent_width, parent_width); 00407 used[row_id]++; 00408 } 00409 } 00410 ++index; 00411 } 00412 } 00413 00414 // Provide hidden gradient.. 00415 bprop_data[layer-1][modules[layer-1]->getPortIndex("hidden.state")] = hidden_state; 00416 00417 // add a gradient that is provided externally on output_i port 00418 Mat *xgrad = ports_gradient[getPortIndex("output_"+tostring(layer))]; 00419 if (xgrad != NULL && !xgrad->isEmpty()) { 00420 //cout << "grad_flow: " << layer << " " << (*xgrad)(0)[0] << endl; 00421 // Length of xgrad is <= hidden_state so we need to sum row by row 00422 for (int mbi = 0; mbi < minibatch_size; ++mbi) { 00423 (*hidden_state)(mod_batch_length[layer-1][mbi]-1) += (*xgrad)(mbi); 00424 } 00425 } 00426 00427 // and ask for visible gradient 00428 bprop_data[layer-1][modules[layer-1]->getPortIndex("visible")] = 00429 createMatrix(0, modules[layer-1]->visible_layer->size, mats); 00430 00431 if (modules[layer-1]->reconstruction_connection != NULL) { 00432 bprop_data[layer-1][modules[layer-1]->getPortIndex("reconstruction_error.state")] = 00433 createMatrix(mod_batch_size, 1, mats); 00434 bprop_data[layer-1][modules[layer-1]->getPortIndex("reconstruction_error.state")]->fill(1); 00435 } 00436 00437 Profiler::start("bprop"); 00438 modules[layer-1]->bpropAccUpdate(fprop_data[layer-1], bprop_data[layer-1]); 00439 Profiler::end("bprop"); 00440 } // for every layer 00441 } else // Propagate through all hierarchy 00442 { 00443 bprop_data[n_layers - 1].resize( modules[n_layers-1]->nPorts() ); 00444 bprop_data[n_layers - 1].fill((Mat*)NULL); 00445 00446 int mod_batch_size = fprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")]->length(); 00447 00448 if (propagate_energy_gradient) { 00449 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("energy")] = createMatrix(mod_batch_size, 1, mats); 00450 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("energy")]->fill(1); 00451 } 00452 00453 if (modules[n_layers-1]->reconstruction_connection != NULL) { 00454 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("reconstruction_error.state")] = 00455 createMatrix(mod_batch_size, 1, mats); 00456 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("reconstruction_error.state")]->fill(1); 00457 } 00458 00459 // Take external gradient on output 00460 int out_grad = getPortIndex("output_"+tostring(n_layers)); 00461 00462 if ( ports_gradient[out_grad] == NULL || ports_gradient[out_grad]->isEmpty() ) { 00463 // Make gradient zero 00464 ports_gradient[out_grad] = createMatrix(mod_batch_size, modules[n_layers-1]->hidden_layer->size, mats); 00465 ports_gradient[out_grad]->fill(0); 00466 PLWARNING("Top RBM output port has no gradient information. Using 0 gradient."); 00467 } 00468 //PLASSERT(ports_gradient[out_grad] != NULL); 00469 00470 00471 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("hidden.state")] = 00472 createMatrix(mod_batch_size, ports_gradient[out_grad]->width(), mats); 00473 *bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("hidden.state")] << *ports_gradient[out_grad]; 00474 00475 // Ask for visible gradient 00476 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")] = 00477 createMatrix(0, modules[n_layers-1]->visible_layer->size, mats); 00478 00479 Profiler::start("bprop"); 00480 modules[n_layers-1]->bpropAccUpdate(fprop_data[n_layers-1], bprop_data[n_layers-1]); 00481 Profiler::end("bprop"); 00482 00483 00484 Mat *mat = bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")]; 00485 for (int i = 0; i < mat->length(); ++i) 00486 for (int j = 0; j < mat->width(); ++j) 00487 (*mat)[i][j] *= gradient_multiplier; 00488 00489 int minibatch_size = ports_value[getPortIndex("input")]->length(); 00490 00491 // Now for every layer take upper layers visible gradient 00492 // and pass it to current layers hidden.state port. 00493 for (int layer = n_layers-1; layer > 0; layer--) { 00494 int n_mod_ports = modules[layer-1]->nPorts(); 00495 00496 bprop_data[layer-1].resize(n_mod_ports); 00497 bprop_data[layer-1].fill((Mat*)NULL); 00498 00499 int mod_batch_size = minibatch_size*layer_sizes[layer-1]; 00500 int width = modules[layer-1]->hidden_layer->size; 00501 00502 Mat *hidden_state = createMatrix(mod_batch_size, width, mats); 00503 Mat *rbm_visible = bprop_data[layer][modules[layer]->getPortIndex("visible")]; 00504 00505 int parent_width = modules[layer-1]->hidden_layer->size; 00506 00507 for (int mbi = 0, index = 0; mbi < minibatch_size; ++mbi) 00508 { 00509 for (int i = 0; i < layer_sizes[layer-1]; ++i) 00510 { 00511 // Write gradient from parent 00512 int parent_ix = mbi*layer_sizes[layer] + i/n_parents_per_node; 00513 int child_ix = i%n_parents_per_node; 00514 (*hidden_state)(index++) << (*rbm_visible)(parent_ix).subVec(child_ix*parent_width, parent_width); 00515 } 00516 } 00517 00518 // Provide hidden gradient.. 00519 bprop_data[layer-1][modules[layer-1]->getPortIndex("hidden.state")] = hidden_state; 00520 00521 // add a gradient that is provided externally on output_i port 00522 Mat *xgrad = ports_gradient[getPortIndex("output_"+tostring(layer))]; 00523 if (xgrad != NULL && !xgrad->isEmpty()) { 00524 //cout << "grad_flow: " << layer << " " << (*xgrad)(0)[0] << endl; 00525 // Length of xgrad is <= hidden_state so we need to sum row by row 00526 for (int mbi = 0; mbi < minibatch_size; ++mbi) { 00527 (*hidden_state)(mbi*layer_sizes[layer-1]+layer_sizes[layer-1]-1) += (*xgrad)(mbi); 00528 } 00529 } 00530 00531 // and ask for visible gradient 00532 bprop_data[layer-1][modules[layer-1]->getPortIndex("visible")] = 00533 createMatrix(0, modules[layer-1]->visible_layer->size, mats); 00534 00535 if (modules[layer-1]->reconstruction_connection != NULL) { 00536 bprop_data[layer-1][modules[layer-1]->getPortIndex("reconstruction_error.state")] = 00537 createMatrix(mod_batch_size, 1, mats); 00538 bprop_data[layer-1][modules[layer-1]->getPortIndex("reconstruction_error.state")]->fill(1); 00539 } 00540 00541 /*for (int i = 0; i < n_mod_ports; ++i) { 00542 cout << i << " " << modules[layer-1]->getPorts()[i] << " "; 00543 if (full_fprop_data[i]) 00544 cout << full_fprop_data[i]->length() << endl; 00545 else 00546 cout << "NULL" << endl; 00547 }*/ 00548 00549 Profiler::start("bprop"); 00550 modules[layer-1]->bpropAccUpdate(fprop_data[layer-1], bprop_data[layer-1]); 00551 Profiler::end("bprop"); 00552 } // for every layer 00553 //updateCache(); // no cache update as we dont have any 00554 } 00555 00556 00557 // Following code would work without need of doing full_fprop. However because RBMMixedLayer caches nll 00558 // during fprop and then reuses it in bprop it is not possible. 00559 /*{ 00560 // For top RBM we provide energy gradient only and get gradient on visible 00561 bprop_data[n_layers - 1].resize( modules[n_layers-1]->nPorts() ); 00562 bprop_data[n_layers - 1].fill((Mat*)NULL); 00563 00564 int mod_batch_size = fprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")]->length(); 00565 00566 if (propagate_energy_gradient) { 00567 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("energy")] = createMatrix(mod_batch_size, 1, mats); 00568 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("energy")]->fill(1); 00569 } 00570 00571 if (modules[n_layers-1]->reconstruction_connection != NULL) { 00572 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("reconstruction_error.state")] = createMatrix(mod_batch_size, 1, mats); 00573 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("reconstruction_error.state")]->fill(1); 00574 } 00575 00576 // Take external gradient on output 00577 int out_grad = getPortIndex("output_"+tostring(n_layers)); 00578 00579 if ( ports_gradient[out_grad] == NULL || ports_gradient[out_grad]->isEmpty() ) { 00580 // Make gradient zero 00581 ports_gradient[out_grad] = createMatrix(mod_batch_size, modules[n_layers-1]->hidden_layer->size, mats); 00582 ports_gradient[out_grad]->fill(0); 00583 PLWARNING("Top RBM output port has no gradient information. Using 0 gradient."); 00584 } 00585 //PLASSERT(ports_gradient[out_grad] != NULL); 00586 00587 00588 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("hidden.state")] = createMatrix(mod_batch_size, ports_gradient[out_grad]->width(), mats); 00589 *bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("hidden.state")] << *ports_gradient[out_grad]; 00590 00591 // Ask for visible gradient 00592 bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")] = createMatrix(0, modules[n_layers-1]->visible_layer->size, mats); 00593 00594 Profiler::start("bprop"); 00595 modules[n_layers-1]->bpropAccUpdate(fprop_data[n_layers-1], bprop_data[n_layers-1]); 00596 Profiler::end("bprop"); 00597 00598 00599 Mat *mat = bprop_data[n_layers-1][modules[n_layers-1]->getPortIndex("visible")]; 00600 for (int i = 0; i < mat->length(); ++i) 00601 for (int j = 0; j < mat->width(); ++j) 00602 (*mat)[i][j] *= gradient_multiplier; 00603 00604 int minibatch_size = ports_value[getPortIndex("input")]->length(); 00605 00606 // Now for every layer take upper layers visible gradient 00607 // and pass it to current layers hidden.state port. 00608 for (int layer = n_layers-1; layer > 0; layer--) { 00609 int n_mod_ports = modules[layer-1]->nPorts(); 00610 00611 bprop_data[layer-1].resize(n_mod_ports); 00612 bprop_data[layer-1].fill((Mat*)NULL); 00613 00614 int mod_batch_size = minibatch_size*layer_sizes[layer-1]; 00615 int width = modules[layer-1]->hidden_layer->size; 00616 00617 // We need to make new fprop_data vector with full(expanded) data. 00618 TVec <Mat*> full_fprop_data(n_mod_ports, (Mat*)NULL); 00619 for (int i = 0; i < n_mod_ports; ++i) { 00620 if (fprop_data[layer-1][i] != NULL && !fprop_data[layer-1][i]->isEmpty() 00621 // HACK to make it work with a hack in RBMModule when visible_activations.state is not computed 00622 && (fprop_data[layer-1][i]->length() > 1 || fprop_data[layer-1][i]->width() > 1) ) { 00623 full_fprop_data[i] = createMatrix(mod_batch_size, fprop_data[layer-1][i]->width(), mats); 00624 } 00625 } 00626 00627 Mat *hidden_state = createMatrix(mod_batch_size, width, mats); 00628 Mat *rbm_visible = bprop_data[layer][modules[layer]->getPortIndex("visible")]; 00629 00630 int parent_width = modules[layer-1]->hidden_layer->size; 00631 00632 for (int mbi = 0, index = 0; mbi < minibatch_size; ++mbi) 00633 { 00634 for (int i = 0; i < layer_sizes[layer-1]; ++i) 00635 { 00636 // Fill full_fprop_data properly 00637 int row_id = mod_batch_length[layer-1][mbi] - hash(mbi_time[mbi], layer-1, i); 00638 for (int j = 0; j < n_mod_ports; ++j) { 00639 if (full_fprop_data[j] != NULL) { 00640 if (row_id < 0) { 00641 // Fill from cache 00642 PLASSERT_MSG(fprop_data_cache[layer-1][j], "Cache is NULL"); 00643 int row_in_cache = fprop_data_cache[layer-1][j]->length()+row_id; 00644 PLASSERT_MSG(row_in_cache >= 0, "Cache is provided but is too small"); 00645 (*full_fprop_data[j])(index) << (*fprop_data_cache[layer-1][j])(row_in_cache); 00646 } else { 00647 (*full_fprop_data[j])(index) << (*fprop_data[layer-1][j])(row_id); 00648 } 00649 } 00650 } 00651 00652 // Write gradient from parent 00653 int parent_ix = mbi*layer_sizes[layer] + i/n_parents_per_node; 00654 int child_ix = i%n_parents_per_node; 00655 (*hidden_state)(index++) << (*rbm_visible)(parent_ix).subVec(child_ix*parent_width, parent_width); 00656 } 00657 } 00658 00659 00660 // Provide hidden gradient.. 00661 bprop_data[layer-1][modules[layer-1]->getPortIndex("hidden.state")] = hidden_state; 00662 00663 // add a gradient that is provided externally on output_i port 00664 Mat *xgrad = ports_gradient[getPortIndex("output_"+tostring(layer))]; 00665 if (xgrad != NULL && !xgrad->isEmpty()) { 00666 //cout << "grad_flow: " << layer << " " << (*xgrad)(0)[0] << endl; 00667 // Length of xgrad is <= hidden_state so we need to sum row by row 00668 for (int mbi = 0; mbi < minibatch_size; ++mbi) { 00669 (*hidden_state)(mbi*layer_sizes[layer-1]+layer_sizes[layer-1]-1) += (*xgrad)(mbi); 00670 } 00671 } 00672 00673 // and ask for visible gradient 00674 bprop_data[layer-1][modules[layer-1]->getPortIndex("visible")] = createMatrix(0, modules[layer-1]->visible_layer->size, mats); 00675 00676 if (modules[layer-1]->reconstruction_connection != NULL) { 00677 bprop_data[layer-1][modules[layer-1]->getPortIndex("reconstruction_error.state")] = createMatrix(mod_batch_size, 1, mats); 00678 bprop_data[layer-1][modules[layer-1]->getPortIndex("reconstruction_error.state")]->fill(1); 00679 } 00680 00681 for (int i = 0; i < n_mod_ports; ++i) { 00682 cout << i << " " << modules[layer-1]->getPorts()[i] << " "; 00683 if (full_fprop_data[i]) 00684 cout << full_fprop_data[i]->length() << endl; 00685 else 00686 cout << "NULL" << endl; 00687 } 00688 00689 Profiler::start("bprop"); 00690 modules[layer-1]->bpropAccUpdate(full_fprop_data, bprop_data[layer-1]); 00691 Profiler::end("bprop"); 00692 } // for every layer 00693 updateCache(); 00694 }*/ 00695 00696 00697 } 00698 00699 //cout << "end back" << endl; 00700 // Ensure all required gradients have been computed. 00701 checkProp(ports_gradient); 00702 00703 Profiler::end("full bprop"); 00704 } 00705 00706 00707 00708 00710 // bpropDoesNothing // 00712 /* THIS METHOD IS OPTIONAL 00713 // the default implementation returns false 00714 bool TreeDBNModule::bpropDoesNothing() 00715 { 00716 } 00717 */ 00718 00720 // finalize // 00722 /* THIS METHOD IS OPTIONAL 00723 void TreeDBNModule::finalize() 00724 { 00725 } 00726 */ 00727 00729 // forget // 00731 void TreeDBNModule::forget() 00732 { 00733 cout << "Forget" << endl; 00734 for (int i = 0; i < n_layers; ++i) 00735 modules[i]->forget(); 00736 } 00737 00739 bool TreeDBNModule::check_shift(Vec &a, Vec& b, int k) 00740 { 00741 PLASSERT(a.length() == b.length()); 00742 00743 for (int i = k; i < a.length(); ++i) { 00744 if ( !fast_is_equal(a[i], b[i-k]) ) 00745 return false; 00746 } 00747 00748 return true; 00749 } 00750 00751 00757 // OK 00758 int TreeDBNModule::hash(int t, int k, int i) 00759 { 00760 if (t < step_size[k]) return layer_sizes[k] - i; // all rbms were computed 00761 if (i == layer_sizes[k] - 1) return 1; // last rbm in layer asked, and was computed 00762 00763 // check if there was a moment when this input was fed to the last rbm in the layer 00764 if ( (layer_sizes[k] - 1 - i)*step_size[k] <= t) { 00765 int t_diff = (layer_sizes[k] - 1 - i)*step_size[k]; 00766 // In first step_size[k] time steps we added layer_size[k] entries. 00767 return t_diff + max(0, step_size[k] - (t - t_diff) - 1)*(layer_sizes[k]-1) + 1; 00768 } 00769 00770 // the only option is that this input was fed to some intermediate rbm 00771 int ix = i + t/step_size[k]; // Index of that rbm 00772 int t_diff = (ix - i)*step_size[k]; // 00773 return t_diff + max(0, step_size[k] - (t - t_diff) - 1)*(layer_sizes[k]-1) + layer_sizes[k] - 1 - ix + 1; 00774 } 00775 00776 // helper function that creates matrix of given size in 00777 // mats vector and returns pointer to it. 00778 Mat* TreeDBNModule::createMatrix(int length, int width, TVec <Mat> &mats) 00779 { 00780 mats.append(Mat(length, width)); 00781 return &mats.lastElement(); 00782 } 00783 00784 00786 void TreeDBNModule::full_fprop(const TVec<Mat*>& ports_value) 00787 { 00788 Profiler::start("full fprop"); 00789 mats.resize(0); 00790 00791 vector <string> prts = modules[0]->getPorts(); 00792 00793 Mat* input = ports_value[getPortIndex("input")]; 00794 int minibatch_size = input->length(); 00795 00796 mbi_time.resize(minibatch_size); 00797 mod_batch_length.resize(n_layers, minibatch_size); 00798 00799 // Process layerwise 00800 for (int layer = 0; layer < n_layers; ++layer) 00801 { 00802 fprop_data[layer].resize(modules[layer]->nPorts()); 00803 fprop_data[layer].fill((Mat*)NULL); 00804 00805 // Count number of rows 00806 int nRows = layer_sizes[layer]*minibatch_size; 00807 00808 // Prepare matrices 00809 Mat* rbm_visible = createMatrix(nRows, modules[layer]->visible_layer->size, mats); 00810 fprop_data[layer][modules[layer]->getPortIndex("visible")] = rbm_visible; 00811 00812 //Create all .state matrices 00813 for (int i = 0; i < modules[layer]->nPorts(); ++i) { 00814 string pname = modules[layer]->getPorts()[i]; 00815 if ( pname.length() > 6 && ".state" == pname.substr(pname.length()-6) ) { 00816 if (fprop_data[layer][i] == NULL) 00817 fprop_data[layer][i] = createMatrix(0, 0, mats); 00818 } 00819 } 00820 00821 if (modules[layer]->reconstruction_connection == NULL) { 00822 fprop_data[layer][modules[layer]->getPortIndex("reconstruction_error.state")] = NULL; 00823 fprop_data[layer][modules[layer]->getPortIndex("visible_reconstruction.state")] = NULL; 00824 fprop_data[layer][modules[layer]->getPortIndex("visible_reconstruction_activations.state")] = NULL; 00825 } 00826 00827 // Create empty matrices for forwarded ports 00828 for (int i = 0; i < nPorts(); ++i) { 00829 if (port_rbms[i] >= 0) { 00830 if (ports_value[i] != NULL && fprop_data[port_rbms[i]][port_index[i]] == NULL) 00831 fprop_data[port_rbms[i]][port_index[i]] = createMatrix(0, 0, mats); 00832 } 00833 } 00834 00835 // Go through all minibatch and fill visible expectations 00836 if (layer == 0) 00837 { // Handle input layer in different manner 00838 int visible_size = modules[layer]->visible_layer->size; 00839 00840 for (int mbi = 0, index = 0; mbi < minibatch_size; ++mbi) 00841 { 00842 for (int i = 0; i < layer_sizes[layer]; ++i) 00843 { 00844 (*rbm_visible)(index++) << (*input)(mbi).subVec(i*visible_size, visible_size); 00845 } 00846 } 00847 } 00848 else 00849 { 00850 // Take parent layer expectations 00851 Mat *expectations = fprop_data[layer-1][modules[layer-1]->getPortIndex("hidden.state")]; 00852 00853 int parent_width = modules[layer-1]->hidden_layer->size; 00854 for (int mbi = 0, index = 0; mbi < minibatch_size; ++mbi) 00855 { 00856 // Compute all rbms 00857 for (int i = 0; i < layer_sizes[layer]; ++i) 00858 { 00859 for (int parent = 0; parent < n_parents_per_node; ++parent) { 00860 int row_id = mbi*layer_sizes[layer-1] + i*n_parents_per_node + parent; 00861 (*rbm_visible)(index).subVec(parent*parent_width, parent_width) << 00862 (*expectations)(row_id); 00863 } 00864 ++index; 00865 } 00866 } 00867 } 00868 00869 Profiler::start("fprop"); 00870 //cout << "fprop: " << endl; 00871 //cout << (*fprop_data[layer][0]) << endl; 00872 //cout << "************" << endl; 00873 modules[layer]->fprop(fprop_data[layer]); 00874 Profiler::end("fprop"); 00875 } 00876 00877 time = 0; 00878 last_full_input.resize(input->width()); 00879 last_full_input << (*input)(minibatch_size-1); 00880 00881 // and write all required output to the provided ports ( output_i + requested ) 00882 //cout << "write" << endl; 00883 for (int i = 0; i < nPorts(); ++i) { 00884 Mat *mat = ports_value[i]; 00885 00886 if ( mat != NULL && mat->isEmpty() ) { 00887 // We check of which layer output should be writen to the port 00888 int pl = port_rbms[i]; 00889 if (pl >= 0) { 00890 mat->resize(minibatch_size, fprop_data[pl][port_index[i]]->width()); 00891 //cout << modules[pl]->getPorts()[i] << endl; 00892 for (int j = 0; j < minibatch_size; ++j) 00893 (*mat)(j) << (*fprop_data[pl][port_index[i]])(layer_sizes[pl]*j + layer_sizes[pl]-1); 00894 } else 00895 PLERROR("Data was requested for a port, but not computed!"); 00896 } 00897 } 00898 00899 //cout << "redirected " << *ports_value[port_redirects[0][0].first] << endl; 00900 //cout << "ffprop end" << endl; 00901 Profiler::end("full fprop"); 00902 00903 //Profiler::report(cout); 00904 } 00905 00906 00908 void TreeDBNModule::fprop(const TVec<Mat*>& ports_value) 00909 { 00910 if (propagate_gradient && propagate_full_gradient) { 00911 full_fprop(ports_value); 00912 return; 00913 } 00914 00915 Profiler::start("full fprop"); 00916 mats.resize(0); 00917 00918 vector <string> prts = modules[0]->getPorts(); 00919 //cout << "*********************" << endl; 00920 //for (int i = 0; i < prts.size(); ++i) 00921 // cout << prts[i] << endl; 00922 //cout << "*********************" << endl; 00923 00924 Mat* input = ports_value[getPortIndex("input")]; 00925 int minibatch_size = input->length(); 00926 int symbol_size = modules[0]->visible_layer->size/n_parents_per_node; 00927 00928 mbi_time.resize(minibatch_size); 00929 mod_batch_length.resize(n_layers, minibatch_size); 00930 00931 // Compute pseudo-time 00932 Vec v = (*input)(0), v2; 00933 if ( last_full_input != NULL && !last_full_input.isEmpty() && check_shift( last_full_input, v, symbol_size ) ) 00934 mbi_time[0] = time + 1; 00935 else 00936 mbi_time[0] = 0; 00937 00938 for (int mbi = 1; mbi < minibatch_size; ++mbi) 00939 { 00940 // Two cases: either it is a shifted version of the previous 00941 // or it is a new word 00942 v = (*input)(mbi-1); v2 = (*input)(mbi); 00943 if ( check_shift( v, v2, symbol_size ) ) 00944 mbi_time[mbi] = mbi_time[mbi-1] + 1; 00945 else 00946 mbi_time[mbi] = 0; 00947 } 00948 00949 // Process layerwise 00950 for (int layer = 0; layer < n_layers; ++layer) 00951 { 00952 fprop_data[layer].resize(modules[layer]->nPorts()); 00953 fprop_data[layer].fill((Mat*)NULL); 00954 00955 // Count number of rows 00956 int nRows = 0; 00957 for (int mbi = 0; mbi < minibatch_size; ++mbi) 00958 { 00959 // We might need to compute either all or only last rbm 00960 if (mbi_time[mbi] < step_size[layer]) nRows += layer_sizes[layer]; 00961 else ++nRows; 00962 } 00963 00964 // Prepare matrices 00965 Mat* rbm_visible = createMatrix(nRows, modules[layer]->visible_layer->size, mats); 00966 fprop_data[layer][modules[layer]->getPortIndex("visible")] = rbm_visible; 00967 00968 //Create all .state matrices 00969 for (int i = 0; i < modules[layer]->nPorts(); ++i) { 00970 string pname = modules[layer]->getPorts()[i]; 00971 if ( pname.length() > 6 && ".state" == pname.substr(pname.length()-6) ) { 00972 if (fprop_data[layer][i] == NULL) 00973 fprop_data[layer][i] = createMatrix(0, 0, mats); 00974 } 00975 } 00976 00977 //fprop_data[layer][modules[layer]->getPortIndex("hidden.state")] = createMatrix(0, 0, mats); 00978 //fprop_data[layer][modules[layer]->getPortIndex("hidden_activations.state")] = createMatrix(0, 0, mats); 00979 00980 if (modules[layer]->reconstruction_connection == NULL) { 00981 fprop_data[layer][modules[layer]->getPortIndex("reconstruction_error.state")] = NULL; 00982 fprop_data[layer][modules[layer]->getPortIndex("visible_reconstruction.state")] = NULL; 00983 fprop_data[layer][modules[layer]->getPortIndex("visible_reconstruction_activations.state")] = NULL; 00984 } 00985 00986 // Create empty matrices for forwarded ports 00987 for (int i = 0; i < nPorts(); ++i) { 00988 if (port_rbms[i] >= 0) { 00989 if (ports_value[i] != NULL && fprop_data[port_rbms[i]][port_index[i]] == NULL) 00990 fprop_data[port_rbms[i]][port_index[i]] = createMatrix(0, 0, mats); 00991 } 00992 } 00993 00994 // Go through all minibatch and fill visible expectations 00995 if (layer == 0) 00996 { // Handle input layer in different manner 00997 int visible_size = modules[layer]->visible_layer->size; 00998 00999 for (int mbi = 0, index = 0; mbi < minibatch_size; ++mbi) 01000 { 01001 // We might need to compute either all or only last rbm 01002 if (mbi_time[mbi] < step_size[layer]) { 01003 // Compute all rbms 01004 for (int i = 0; i < layer_sizes[layer]; ++i) 01005 { 01006 (*rbm_visible)(index++) << (*input)(mbi).subVec(i*visible_size, visible_size); 01007 } 01008 } else { 01009 // Compute only last rbm 01010 (*rbm_visible)(index++) << (*input)(mbi).subVec((layer_sizes[layer]-1)*visible_size, visible_size); 01011 } 01012 mod_batch_length[0][mbi] = index; 01013 } 01014 } 01015 else 01016 { 01017 // Take parent layer expectations 01018 Mat *expectations = fprop_data[layer-1][modules[layer-1]->getPortIndex("hidden.state")]; 01019 Mat *expectations_cache = fprop_data_cache[layer-1][modules[layer-1]->getPortIndex("hidden.state")]; 01020 01021 int parent_width = modules[layer-1]->hidden_layer->size; 01022 for (int mbi = 0, index = 0; mbi < minibatch_size; ++mbi) 01023 { 01024 // We might need to compute either all or only last rbm 01025 if (mbi_time[mbi] < step_size[layer]) { 01026 // Compute all rbms 01027 for (int i = 0; i < layer_sizes[layer]; ++i) 01028 { 01029 for (int parent = 0; parent < n_parents_per_node; ++parent) { 01030 int row_id = mod_batch_length[layer-1][mbi] - hash(mbi_time[mbi], layer-1, n_parents_per_node*i + parent); 01031 //cout << "RID*: " << row_id << endl; 01032 if (row_id < 0) { 01033 // It must be in cache 01034 PLASSERT_MSG(expectations_cache, "Cache is NULL"); 01035 int row_in_cache = expectations_cache->length()+row_id; 01036 PLASSERT_MSG(row_in_cache >= 0, "Cache is provided but is too small"); 01037 (*rbm_visible)(index).subVec(parent*parent_width, parent_width) << 01038 (*expectations_cache)(row_in_cache); 01039 } else { 01040 (*rbm_visible)(index).subVec(parent*parent_width, parent_width) << 01041 (*expectations)(row_id); 01042 } 01043 } 01044 ++index; 01045 } 01046 } else { 01047 // Compute only last rbm 01048 for (int parent = 0; parent < n_parents_per_node; ++parent) { 01049 int row_id = mod_batch_length[layer-1][mbi] - hash(mbi_time[mbi], layer-1, n_parents_per_node*(layer_sizes[layer]-1) + parent); 01050 //cout << "RID: " << row_id << endl; 01051 //cout << mbi_time[mbi] << " " << mod_batch_length[mbi] << " " << hash(mbi_time[mbi], layer-1, 2*(layer_sizes[layer]-1) + parent) << " "<< row_id << endl; 01052 if (row_id < 0) { 01053 // It must be in cache 01054 PLASSERT_MSG(expectations_cache, "Cache is NULL"); 01055 int row_in_cache = expectations_cache->length()+row_id; 01056 PLASSERT_MSG(row_in_cache >= 0, "Cache is provided but is too small"); 01057 (*rbm_visible)(index).subVec(parent*parent_width, parent_width) << 01058 (*expectations_cache)(row_in_cache); 01059 } else { 01060 (*rbm_visible)(index).subVec(parent*parent_width, parent_width) << 01061 (*expectations)(row_id); 01062 } 01063 } 01064 ++index; 01065 } 01066 mod_batch_length[layer][mbi] = index; 01067 } 01068 } 01069 01070 Profiler::start("fprop"); 01071 //cout << "fprop: " << endl; 01072 //cout << (*fprop_data[layer][0]) << endl; 01073 //cout << "************" << endl; 01074 modules[layer]->fprop(fprop_data[layer]); 01075 Profiler::end("fprop"); 01076 } 01077 01078 time = mbi_time[minibatch_size-1]; 01079 last_full_input.resize(input->width()); 01080 last_full_input << (*input)(minibatch_size-1); 01081 01082 // Final things: fill the cache... 01083 if (!propagate_gradient || !propagate_full_gradient) 01084 updateCache(); 01085 01086 // and write all required output to the provided ports ( output_i + requested ) 01087 for (int i = 0; i < nPorts(); ++i) { 01088 Mat *mat = ports_value[i]; 01089 01090 if ( mat != NULL && mat->isEmpty() ) { 01091 // We check of which layer output should be writen to the port 01092 int pl = port_rbms[i]; 01093 if (pl >= 0) { 01094 mat->resize(minibatch_size, fprop_data[pl][port_index[i]]->width()); 01095 for (int j = 0; j < minibatch_size; ++j) 01096 (*mat)(j) << (*fprop_data[pl][port_index[i]])(mod_batch_length[pl][j] - 1); 01097 } else 01098 PLERROR("Data was requested for a port, but not computed!"); 01099 } 01100 } 01101 01102 //cout << "redirected " << *ports_value[port_redirects[0][0].first] << endl; 01103 01104 Profiler::end("full fprop"); 01105 01106 //Profiler::report(cout); 01107 } 01108 01110 void TreeDBNModule::updateCache() 01111 { 01112 //cache_mats.resize(0); 01113 for (int i = 0; i < n_layers; ++i) { 01114 int n_ports = modules[i]->nPorts(); 01115 for (int j = 0; j < n_ports; ++j) { 01116 01117 if (fprop_data[i][j] != NULL && !fprop_data[i][j]->isEmpty()) { 01118 // Take last rows 01119 int max_rows = layer_sizes[0]*n_parents_per_node; // max we could need 01120 if (fprop_data[i][j]->length() > max_rows) { 01121 //cout << "full cache" << endl; 01122 // copy submatrix 01123 if (fprop_data_cache[i][j] == NULL) 01124 fprop_data_cache[i][j] = createMatrix(max_rows, fprop_data[i][j]->width(), cache_mats); 01125 else 01126 fprop_data_cache[i][j]->resize(max_rows, fprop_data[i][j]->width()); 01127 *fprop_data_cache[i][j] << fprop_data[i][j]->subMatRows(fprop_data[i][j]->length()-max_rows, max_rows); 01128 } else { 01129 if (fprop_data_cache[i][j] == NULL) { // have no cache, copy all 01130 //cout << "first cache " << i << " " << j << endl; 01131 fprop_data_cache[i][j] = createMatrix(fprop_data[i][j]->length(), fprop_data[i][j]->width(), cache_mats); 01132 *fprop_data_cache[i][j] << *fprop_data[i][j]; 01133 } else { 01134 //cout << "part cache" << endl; 01135 // had something.., check how many rows we have to leave 01136 int rows_reuse = min(max_rows - fprop_data[i][j]->length(), fprop_data_cache[i][j]->length()); 01137 Mat tmp(rows_reuse, fprop_data[i][j]->width()); 01138 tmp << fprop_data_cache[i][j]->subMatRows(fprop_data_cache[i][j]->length() - rows_reuse, rows_reuse); 01139 fprop_data_cache[i][j]->resize(rows_reuse + fprop_data[i][j]->length(), fprop_data[i][j]->width()); 01140 fprop_data_cache[i][j]->subMatRows(0, rows_reuse) << tmp; 01141 fprop_data_cache[i][j]->subMatRows(rows_reuse, fprop_data[i][j]->length()) << *fprop_data[i][j]; 01142 } 01143 } 01144 } 01145 01146 // TODO if we stop calculate fprop_data for some port the cache should be deleted (?) 01147 } 01148 } 01149 } 01150 01152 void TreeDBNModule::clearCache() 01153 { 01154 time = 0; 01155 cache_mats.resize(0); 01156 for (int i = 0; i < n_layers; ++i) { 01157 int n_ports = modules[i]->nPorts(); 01158 for (int j = 0; j < n_ports; ++j) { 01159 fprop_data_cache[i][j] = NULL; 01160 bprop_data_cache[i][j] = NULL; 01161 } 01162 } 01163 } 01164 01167 void TreeDBNModule::initSampling(int gibbsTop) 01168 { 01169 modules[n_layers-1]->min_n_Gibbs_steps = gibbsTop; 01170 01171 Mat hidden(1, modules[n_layers-1]->hidden_layer->size); 01172 01173 for (int i = 0; i < modules[n_layers-1]->hidden_layer->size; ++i) 01174 { 01175 hidden[0][i] = rand() & 1; 01176 } 01177 01178 Mat exp; 01179 TVec <Mat*> fprop_data(modules[n_layers-1]->nPorts(), (Mat*)NULL); 01180 01181 fprop_data[modules[n_layers-1]->getPortIndex("hidden_sample")] = &hidden; 01182 fprop_data[modules[n_layers-1]->getPortIndex("visible_sample")] = &exp; 01183 01184 // Initialize with random sample 01185 modules[n_layers-1]->fprop(fprop_data); 01186 01187 // Run chain for min_n_Gibbs_steps 01188 fprop_data.fill((Mat*)NULL); 01189 exp.resize(0,0); 01190 fprop_data[modules[n_layers-1]->getPortIndex("visible_sample")] = &exp; 01191 modules[n_layers-1]->fprop(fprop_data); 01192 } 01193 01194 01196 Vec TreeDBNModule::sample(int gibbsTop) 01197 { 01198 modules[n_layers-1]->n_Gibbs_steps_per_generated_sample = gibbsTop; 01199 01200 // Sample visible expectations from top layer rbm 01201 TVec <Mat> samples(n_layers); 01202 01203 TVec <Mat*> fprop_data(modules[n_layers-1]->nPorts(), (Mat*)NULL); 01204 01205 fprop_data[modules[n_layers-1]->getPortIndex("visible_sample")] = &samples[n_layers-1]; 01206 01207 modules[n_layers-1]->fprop(fprop_data); 01208 01209 // Propagate expectations down the network 01210 for (int layer = n_layers-2; layer >= 0; --layer) 01211 { 01212 // Fill hidden sample for layer rbms 01213 int width = modules[layer]->hidden_layer->size; 01214 Mat hidden_sample(layer_sizes[layer], width); 01215 for (int i = 0; i < layer_sizes[layer]; ++i) 01216 { 01217 hidden_sample(i) << samples[layer+1](i/n_parents_per_node).subVec((i%n_parents_per_node)*width, width); 01218 } 01219 01220 TVec <Mat*> fp_data(modules[layer]->nPorts(), (Mat*)NULL); 01221 //fp_data[modules[layer]->getPortIndex("visible_reconstruction.state")] = &samples[layer]; 01222 //fp_data[modules[layer]->getPortIndex("hidden.state")] = &hidden_sample; 01223 fp_data[modules[layer]->getPortIndex("visible_sample")] = &samples[layer]; 01224 fp_data[modules[layer]->getPortIndex("hidden_sample")] = &hidden_sample; 01225 01226 modules[layer]->fprop(fp_data); 01227 } 01228 01229 Vec sample(samples[0].size()); 01230 for (int i = 0; i < samples[0].length(); ++i) 01231 sample.subVec(i*samples[0].width(), samples[0].width()) << samples[0](i); 01232 01233 return sample; 01234 } 01235 01237 // getPortIndex // 01239 /* Optional 01240 int TreeDBNModule::getPortIndex(const string& port) 01241 {} 01242 */ 01243 01245 // getPorts // 01247 const TVec<string>& TreeDBNModule::getPorts() { 01248 return port_names; 01249 } 01250 01252 // getPortSizes // 01254 /* Optional 01255 const TMat<int>& TreeDBNModule::getPortSizes() { 01256 } 01257 */ 01258 01260 // makeDeepCopyFromShallowCopy // 01262 void TreeDBNModule::makeDeepCopyFromShallowCopy(CopiesMap& copies) 01263 { 01264 inherited::makeDeepCopyFromShallowCopy(copies); 01265 01266 // ### Call deepCopyField on all "pointer-like" fields 01267 // ### that you wish to be deepCopied rather than 01268 // ### shallow-copied. 01269 // ### ex: 01270 deepCopyField(modules, copies); 01271 01272 // ### Remove this line when you have fully implemented this method. 01273 //PLERROR("TreeDBNModule::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!"); 01274 } 01275 01277 // setLearningRate // 01279 /* OPTIONAL 01280 // The default implementation raises a warning and does not do anything. 01281 void TreeDBNModule::setLearningRate(real dynamic_learning_rate) 01282 { 01283 } 01284 */ 01285 01286 01287 } 01288 // end of namespace PLearn 01289 01290 01291 /* 01292 Local Variables: 01293 mode:c++ 01294 c-basic-offset:4 01295 c-file-style:"stroustrup" 01296 c-file-offsets:((innamespace . 0)(inline-open . 0)) 01297 indent-tabs-mode:nil 01298 fill-column:79 01299 End: 01300 */ 01301 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :