PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // RBMWoodsLayer.cc 00004 // 00005 // Copyright (C) 2008 Hugo Larochelle 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Hugo Larochelle 00036 00041 #include "RBMWoodsLayer.h" 00042 #include <plearn/math/TMat_maths.h> 00043 #include "RBMConnection.h" 00044 00045 namespace PLearn { 00046 using namespace std; 00047 00048 PLEARN_IMPLEMENT_OBJECT( 00049 RBMWoodsLayer, 00050 "RBM layer with tree-structured groups of units.", 00051 ""); 00052 00053 RBMWoodsLayer::RBMWoodsLayer( real the_learning_rate ) : 00054 inherited( the_learning_rate ), 00055 n_trees( 10 ), 00056 tree_depth( 3 ), 00057 use_signed_samples( false ) 00058 { 00059 } 00060 00062 // generateSample // 00064 void RBMWoodsLayer::generateSample() 00065 { 00066 PLASSERT_MSG(random_gen, 00067 "random_gen should be initialized before generating samples"); 00068 00069 PLCHECK_MSG(expectation_is_up_to_date, "Expectation should be computed " 00070 "before calling generateSample()"); 00071 00072 sample.clear(); 00073 00074 int n_nodes_per_tree = size / n_trees; 00075 int node, depth, node_sample, sub_tree_size; 00076 int offset = 0; 00077 00078 for( int t=0; t<n_trees; t++ ) 00079 { 00080 depth = 0; 00081 node = n_nodes_per_tree / 2; 00082 sub_tree_size = node; 00083 while( depth < tree_depth ) 00084 { 00085 // HUGO: Note that local_node_expectation is really 00086 // used as a probability, even for signed samples. 00087 // Sorry for the misleading choice of variable name... 00088 node_sample = random_gen->binomial_sample( 00089 local_node_expectation[ node + offset ] ); 00090 if( use_signed_samples ) 00091 sample[node + offset] = 2*node_sample-1; 00092 else 00093 sample[node + offset] = node_sample; 00094 00095 // Descending in the tree 00096 sub_tree_size /= 2; 00097 if ( node_sample > 0.5 ) 00098 node -= sub_tree_size+1; 00099 else 00100 node += sub_tree_size+1; 00101 depth++; 00102 } 00103 offset += n_nodes_per_tree; 00104 } 00105 } 00106 00108 // generateSamples // 00110 void RBMWoodsLayer::generateSamples() 00111 { 00112 PLASSERT_MSG(random_gen, 00113 "random_gen should be initialized before generating samples"); 00114 00115 PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed " 00116 "before calling generateSamples()"); 00117 00118 PLASSERT( samples.width() == size && samples.length() == batch_size ); 00119 00120 //PLERROR( "RBMWoodsLayer::generateSamples(): not implemented yet" ); 00121 samples.clear(); 00122 00123 int n_nodes_per_tree = size / n_trees; 00124 int node, depth, node_sample, sub_tree_size; 00125 int offset = 0; 00126 00127 for( int b=0; b<batch_size; b++ ) 00128 { 00129 offset = 0; 00130 for( int t=0; t<n_trees; t++ ) 00131 { 00132 depth = 0; 00133 node = n_nodes_per_tree / 2; 00134 sub_tree_size = node; 00135 while( depth < tree_depth ) 00136 { 00137 // HUGO: Note that local_node_expectation is really 00138 // used as a probability, even for signed samples. 00139 // Sorry for the misleading choice of variable name... 00140 node_sample = random_gen->binomial_sample( 00141 local_node_expectations(b, node + offset ) ); 00142 if( use_signed_samples ) 00143 samples(b,node + offset) = 2*node_sample-1; 00144 else 00145 samples(b,node + offset) = node_sample; 00146 00147 // Descending in the tree 00148 sub_tree_size /= 2; 00149 if ( node_sample > 0.5 ) 00150 node -= sub_tree_size+1; 00151 else 00152 node += sub_tree_size+1; 00153 depth++; 00154 } 00155 offset += n_nodes_per_tree; 00156 } 00157 } 00158 } 00159 00160 void RBMWoodsLayer::computeProbabilisticClustering(Vec& prob_clusters) 00161 { 00162 computeExpectation(); 00163 int offset = 0; 00164 int n_nodes_per_tree = size / n_trees; 00165 prob_clusters.resize(n_trees*(n_nodes_per_tree+1)); 00166 for( int t=0; t<n_trees; t++ ) 00167 { 00168 for( int i=0; i<n_nodes_per_tree; i = i+2) 00169 prob_clusters[i+offset+t] = expectation[i+offset]; 00170 for( int i=0; i<n_nodes_per_tree; i = i+2) 00171 prob_clusters[i+1+offset+t] = off_expectation[i+offset]; 00172 offset += n_nodes_per_tree; 00173 } 00174 } 00175 00177 // computeExpectation // 00179 void RBMWoodsLayer::computeExpectation() 00180 { 00181 if( expectation_is_up_to_date ) 00182 return; 00183 00184 int n_nodes_per_tree = size / n_trees; 00185 int node, depth, sub_tree_size, grand_parent; 00186 int offset = 0; 00187 bool left_of_grand_parent; 00188 real grand_parent_prob; 00189 00190 // Get local expectations at every node 00191 00192 // HUGO: Note that local_node_expectation is really 00193 // used as a probability, even for signed samples. 00194 // Sorry for the misleading choice of variable name... 00195 00196 // Divide and conquer computation of local (conditional) free energies 00197 for( int t=0; t<n_trees; t++ ) 00198 { 00199 depth = tree_depth-1; 00200 sub_tree_size = 0; 00201 00202 // Initialize last level 00203 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00204 { 00205 //on_free_energy[ n + offset ] = safeexp(activation[n+offset]); 00206 //off_free_energy[ n + offset ] = 1; 00207 // Now working in log-domain 00208 on_free_energy[ n + offset ] = activation[n+offset]; 00209 if( use_signed_samples ) 00210 off_free_energy[ n + offset ] = -activation[n+offset]; 00211 else 00212 off_free_energy[ n + offset ] = 0; 00213 } 00214 00215 depth = tree_depth-2; 00216 sub_tree_size = 1; 00217 00218 while( depth >= 0 ) 00219 { 00220 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00221 { 00222 //on_free_energy[ n + offset ] = safeexp(activation[n+offset]) * 00223 // ( on_free_energy[n + offset - sub_tree_size] + off_free_energy[n + offset - sub_tree_size] ) ; 00224 //off_free_energy[ n + offset ] = 00225 // ( on_free_energy[n + offset + sub_tree_size] + off_free_energy[n + offset + sub_tree_size] ) ; 00226 // Now working in log-domain 00227 on_free_energy[ n + offset ] = activation[n+offset] + 00228 logadd( on_free_energy[n + offset - (sub_tree_size/2+1)], 00229 off_free_energy[n + offset - (sub_tree_size/2+1)] ) ; 00230 if( use_signed_samples ) 00231 off_free_energy[ n + offset ] = -activation[n+offset] + 00232 logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], 00233 off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; 00234 else 00235 off_free_energy[ n + offset ] = 00236 logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], 00237 off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; 00238 00239 } 00240 sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; 00241 depth--; 00242 } 00243 offset += n_nodes_per_tree; 00244 } 00245 00246 for( int i=0 ; i<size ; i++ ) 00247 //local_node_expectation[i] = on_free_energy[i] / ( on_free_energy[i] + off_free_energy[i] ); 00248 // Now working in log-domain 00249 local_node_expectation[i] = safeexp(on_free_energy[i] 00250 - logadd(on_free_energy[i], off_free_energy[i])); 00251 00252 // Compute marginal expectations 00253 offset = 0; 00254 for( int t=0; t<n_trees; t++ ) 00255 { 00256 // Initialize root 00257 node = n_nodes_per_tree / 2; 00258 expectation[ node + offset ] = local_node_expectation[ node + offset ]; 00259 off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]); 00260 sub_tree_size = node; 00261 00262 // First level nodes 00263 depth = 1; 00264 sub_tree_size /= 2; 00265 00266 // Left child 00267 node = sub_tree_size; 00268 expectation[ node + offset ] = local_node_expectation[ node + offset ] 00269 * local_node_expectation[ node + offset + sub_tree_size + 1 ]; 00270 off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) 00271 * local_node_expectation[ node + offset + sub_tree_size + 1 ]; 00272 00273 // Right child 00274 node = 3*sub_tree_size+2; 00275 expectation[ node + offset ] = local_node_expectation[ node + offset ] 00276 * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); 00277 off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) 00278 * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); 00279 00280 // Set other nodes, level-wise 00281 depth = 2; 00282 sub_tree_size /= 2; 00283 while( depth < tree_depth ) 00284 { 00285 // Left child 00286 left_of_grand_parent = true; 00287 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00288 { 00289 if( left_of_grand_parent ) 00290 { 00291 grand_parent = n + offset + 3*sub_tree_size + 3; 00292 grand_parent_prob = expectation[ grand_parent ]; 00293 left_of_grand_parent = false; 00294 } 00295 else 00296 { 00297 grand_parent = n + offset - sub_tree_size - 1; 00298 grand_parent_prob = off_expectation[ grand_parent ]; 00299 left_of_grand_parent = true; 00300 } 00301 00302 expectation[ n + offset ] = local_node_expectation[ n + offset ] 00303 * local_node_expectation[ n + offset + sub_tree_size + 1 ] 00304 * grand_parent_prob; 00305 off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) 00306 * local_node_expectation[ n + offset + sub_tree_size + 1 ] 00307 * grand_parent_prob; 00308 00309 } 00310 00311 // Right child 00312 left_of_grand_parent = true; 00313 for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00314 { 00315 if( left_of_grand_parent ) 00316 { 00317 grand_parent = n + offset + sub_tree_size + 1; 00318 grand_parent_prob = expectation[ grand_parent ]; 00319 left_of_grand_parent = false; 00320 } 00321 else 00322 { 00323 grand_parent = n + offset - 3*sub_tree_size - 3; 00324 grand_parent_prob = off_expectation[ grand_parent ]; 00325 left_of_grand_parent = true; 00326 } 00327 00328 expectation[ n + offset ] = local_node_expectation[ n + offset ] 00329 * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) 00330 * grand_parent_prob; 00331 off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) 00332 * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) 00333 * grand_parent_prob; 00334 } 00335 sub_tree_size /= 2; 00336 depth++; 00337 } 00338 offset += n_nodes_per_tree; 00339 } 00340 00341 if( use_signed_samples ) 00342 for( int i=0; i<expectation.length(); i++ ) 00343 expectation[i] = expectation[i] - off_expectation[i]; 00344 00345 expectation_is_up_to_date = true; 00346 } 00347 00349 // computeExpectations // 00351 void RBMWoodsLayer::computeExpectations() 00352 { 00353 if( expectations_are_up_to_date ) 00354 return; 00355 00356 PLASSERT( expectations.width() == size 00357 && expectations.length() == batch_size ); 00358 off_expectations.resize(batch_size,size); 00359 local_node_expectations.resize(batch_size,size); 00360 on_free_energies.resize(batch_size,size); 00361 off_free_energies.resize(batch_size,size); 00362 00363 int n_nodes_per_tree = size / n_trees; 00364 int node, depth, sub_tree_size, grand_parent; 00365 int offset = 0; 00366 bool left_of_grand_parent; 00367 real grand_parent_prob; 00368 for( int b=0; b<batch_size; b++ ) 00369 { 00370 offset=0; 00371 // Get local expectations at every node 00372 00373 // HUGO: Note that local_node_expectations is really 00374 // used as a probability, even for signed samples. 00375 // Sorry for the misleading choice of variable name... 00376 00377 // Divide and conquer computation of local (conditional) free energies 00378 for( int t=0; t<n_trees; t++ ) 00379 { 00380 depth = tree_depth-1; 00381 sub_tree_size = 0; 00382 00383 // Initialize last level 00384 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00385 { 00386 //on_free_energies(b, n + offset ) = safeexp(activations(b,n+offset)); 00387 //off_free_energies(b, n + offset ) = 1; 00388 // Now working in log-domain 00389 on_free_energies(b, n + offset ) = activations(b,n+offset); 00390 if( use_signed_samples ) 00391 off_free_energies(b, n + offset ) = -activations(b,n+offset); 00392 else 00393 off_free_energies(b, n + offset ) = 0; 00394 } 00395 00396 depth = tree_depth-2; 00397 sub_tree_size = 1; 00398 00399 while( depth >= 0 ) 00400 { 00401 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00402 { 00403 //on_free_energies(b, n + offset ) = safeexp(activations(b,n+offset)) * 00404 // ( on_free_energies(b,n + offset - sub_tree_size) + off_free_energies(b,n + offset - sub_tree_size) ) ; 00405 //off_free_energies(b, n + offset ) = 00406 // ( on_free_energies(b,n + offset + sub_tree_size) + off_free_energies(b,n + offset + sub_tree_size) ) ; 00407 // Now working in log-domain 00408 on_free_energies(b, n + offset ) = activations(b,n+offset) + 00409 logadd( on_free_energies(b,n + offset - (sub_tree_size/2+1)), 00410 off_free_energies(b,n + offset - (sub_tree_size/2+1)) ) ; 00411 if( use_signed_samples ) 00412 off_free_energies(b, n + offset ) = -activations(b,n+offset) + 00413 logadd( on_free_energies(b,n + offset + (sub_tree_size/2+1)), 00414 off_free_energies(b,n + offset + (sub_tree_size/2+1)) ) ; 00415 else 00416 off_free_energies(b, n + offset ) = 00417 logadd( on_free_energies(b,n + offset + (sub_tree_size/2+1)), 00418 off_free_energies(b,n + offset + (sub_tree_size/2+1)) ) ; 00419 00420 } 00421 sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; 00422 depth--; 00423 } 00424 offset += n_nodes_per_tree; 00425 } 00426 00427 for( int i=0 ; i<size ; i++ ) 00428 //local_node_expectations(b,i) = on_free_energies(b,i) / ( on_free_energies(b,i) + off_free_energies(b,i) ); 00429 // Now working in log-domain 00430 local_node_expectations(b,i) = safeexp(on_free_energies(b,i) 00431 - logadd(on_free_energies(b,i), off_free_energies(b,i))); 00432 00433 // Compute marginal expectations 00434 offset = 0; 00435 for( int t=0; t<n_trees; t++ ) 00436 { 00437 // Initialize root 00438 node = n_nodes_per_tree / 2; 00439 expectations(b, node + offset ) = local_node_expectations(b, node + offset ); 00440 off_expectations(b, node + offset ) = (1 - local_node_expectations(b, node + offset )); 00441 sub_tree_size = node; 00442 00443 // First level nodes 00444 depth = 1; 00445 sub_tree_size /= 2; 00446 00447 // Left child 00448 node = sub_tree_size; 00449 expectations(b, node + offset ) = local_node_expectations(b, node + offset ) 00450 * local_node_expectations(b, node + offset + sub_tree_size + 1 ); 00451 off_expectations(b, node + offset ) = (1 - local_node_expectations(b, node + offset )) 00452 * local_node_expectations(b, node + offset + sub_tree_size + 1 ); 00453 00454 // Right child 00455 node = 3*sub_tree_size+2; 00456 expectations(b, node + offset ) = local_node_expectations(b, node + offset ) 00457 * (1 - local_node_expectations(b, node + offset - sub_tree_size - 1 )); 00458 off_expectations(b, node + offset ) = (1 - local_node_expectations(b, node + offset )) 00459 * (1 - local_node_expectations(b, node + offset - sub_tree_size - 1 )); 00460 00461 // Set other nodes, level-wise 00462 depth = 2; 00463 sub_tree_size /= 2; 00464 while( depth < tree_depth ) 00465 { 00466 // Left child 00467 left_of_grand_parent = true; 00468 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00469 { 00470 if( left_of_grand_parent ) 00471 { 00472 grand_parent = n + offset + 3*sub_tree_size + 3; 00473 grand_parent_prob = expectations(b, grand_parent ); 00474 left_of_grand_parent = false; 00475 } 00476 else 00477 { 00478 grand_parent = n + offset - sub_tree_size - 1; 00479 grand_parent_prob = off_expectations(b, grand_parent ); 00480 left_of_grand_parent = true; 00481 } 00482 00483 expectations(b, n + offset ) = local_node_expectations(b, n + offset ) 00484 * local_node_expectations(b, n + offset + sub_tree_size + 1 ) 00485 * grand_parent_prob; 00486 off_expectations(b, n + offset ) = (1 - local_node_expectations(b, n + offset )) 00487 * local_node_expectations(b, n + offset + sub_tree_size + 1 ) 00488 * grand_parent_prob; 00489 00490 } 00491 00492 // Right child 00493 left_of_grand_parent = true; 00494 for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00495 { 00496 if( left_of_grand_parent ) 00497 { 00498 grand_parent = n + offset + sub_tree_size + 1; 00499 grand_parent_prob = expectations(b, grand_parent ); 00500 left_of_grand_parent = false; 00501 } 00502 else 00503 { 00504 grand_parent = n + offset - 3*sub_tree_size - 3; 00505 grand_parent_prob = off_expectations(b, grand_parent ); 00506 left_of_grand_parent = true; 00507 } 00508 00509 expectations(b, n + offset ) = local_node_expectations(b, n + offset ) 00510 * (1 - local_node_expectations(b, n + offset - sub_tree_size - 1 )) 00511 * grand_parent_prob; 00512 off_expectations(b, n + offset ) = (1 - local_node_expectations(b, n + offset )) 00513 * (1 - local_node_expectations(b, n + offset - sub_tree_size - 1 )) 00514 * grand_parent_prob; 00515 } 00516 sub_tree_size /= 2; 00517 depth++; 00518 } 00519 offset += n_nodes_per_tree; 00520 } 00521 } 00522 00523 if( use_signed_samples ) 00524 for( int b=0; b<batch_size; b++ ) 00525 for( int i=0; i<expectation.length(); i++ ) 00526 expectations(b,i) = expectations(b,i) - off_expectations(b,i); 00527 00528 expectations_are_up_to_date = true; 00529 } 00530 00532 // fprop // 00534 void RBMWoodsLayer::fprop( const Vec& input, Vec& output ) const 00535 { 00536 PLASSERT( input.size() == input_size ); 00537 output.resize( output_size ); 00538 00539 int n_nodes_per_tree = size / n_trees; 00540 int node, depth, sub_tree_size, grand_parent; 00541 int offset = 0; 00542 bool left_of_grand_parent; 00543 real grand_parent_prob; 00544 00545 // Get local expectations at every node 00546 00547 // Divide and conquer computation of local (conditional) free energies 00548 for( int t=0; t<n_trees; t++ ) 00549 { 00550 depth = tree_depth-1; 00551 sub_tree_size = 0; 00552 00553 // Initialize last level 00554 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00555 { 00556 //on_free_energy[ n + offset ] = safeexp(input[n+offset] + bias[n+offset]); 00557 //off_free_energy[ n + offset ] = 1; 00558 // Now working in log-domain 00559 on_free_energy[ n + offset ] = input[n+offset] + bias[n+offset]; 00560 if( use_signed_samples ) 00561 off_free_energy[ n + offset ] = -(input[n+offset] + bias[n+offset]); 00562 else 00563 off_free_energy[ n + offset ] = 0; 00564 } 00565 00566 depth = tree_depth-2; 00567 sub_tree_size = 1; 00568 00569 while( depth >= 0 ) 00570 { 00571 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00572 { 00573 //on_free_energy[ n + offset ] = safeexp(input[n+offset] + bias[n+offset]) * 00574 // ( on_free_energy[n + offset - sub_tree_size] + off_free_energy[n + offset - sub_tree_size] ) ; 00575 //off_free_energy[ n + offset ] = 00576 // ( on_free_energy[n + offset + sub_tree_size] + off_free_energy[n + offset + sub_tree_size] ) ; 00577 // Now working in the log-domain 00578 on_free_energy[ n + offset ] = input[n+offset] + bias[n+offset] + 00579 logadd( on_free_energy[n + offset - (sub_tree_size/2+1)], 00580 off_free_energy[n + offset - (sub_tree_size/2+1)] ) ; 00581 if( use_signed_samples ) 00582 off_free_energy[ n + offset ] = -(input[n+offset] + bias[n+offset]) + 00583 logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], 00584 off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; 00585 else 00586 off_free_energy[ n + offset ] = 00587 logadd( on_free_energy[n + offset + (sub_tree_size/2+1)], 00588 off_free_energy[n + offset + (sub_tree_size/2+1)] ) ; 00589 } 00590 sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; 00591 depth--; 00592 } 00593 offset += n_nodes_per_tree; 00594 } 00595 00596 for( int i=0 ; i<size ; i++ ) 00597 //local_node_expectation[i] = on_free_energy[i] / ( on_free_energy[i] + off_free_energy[i] ); 00598 // Now working in log-domain 00599 local_node_expectation[i] = safeexp(on_free_energy[i] 00600 - logadd(on_free_energy[i], off_free_energy[i])); 00601 00602 // Compute marginal expectations 00603 offset = 0; 00604 for( int t=0; t<n_trees; t++ ) 00605 { 00606 // Initialize root 00607 node = n_nodes_per_tree / 2; 00608 output[ node + offset ] = local_node_expectation[ node + offset ]; 00609 off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]); 00610 sub_tree_size = node; 00611 00612 // First level nodes 00613 depth = 1; 00614 sub_tree_size /= 2; 00615 00616 // Left child 00617 node = sub_tree_size; 00618 output[ node + offset ] = local_node_expectation[ node + offset ] 00619 * local_node_expectation[ node + offset + sub_tree_size + 1 ]; 00620 off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) 00621 * local_node_expectation[ node + offset + sub_tree_size + 1 ]; 00622 00623 // Right child 00624 node = 3*sub_tree_size+2; 00625 output[ node + offset ] = local_node_expectation[ node + offset ] 00626 * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); 00627 off_expectation[ node + offset ] = (1 - local_node_expectation[ node + offset ]) 00628 * (1 - local_node_expectation[ node + offset - sub_tree_size - 1 ]); 00629 00630 // Set other nodes, level-wise 00631 depth = 2; 00632 sub_tree_size /= 2; 00633 while( depth < tree_depth ) 00634 { 00635 // Left child 00636 left_of_grand_parent = true; 00637 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00638 { 00639 if( left_of_grand_parent ) 00640 { 00641 grand_parent = n + offset + 3*sub_tree_size + 3; 00642 grand_parent_prob = output[ grand_parent ]; 00643 left_of_grand_parent = false; 00644 } 00645 else 00646 { 00647 grand_parent = n + offset - sub_tree_size - 1; 00648 grand_parent_prob = off_expectation[ grand_parent ]; 00649 left_of_grand_parent = true; 00650 } 00651 00652 output[ n + offset ] = local_node_expectation[ n + offset ] 00653 * local_node_expectation[ n + offset + sub_tree_size + 1 ] 00654 * grand_parent_prob; 00655 off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) 00656 * local_node_expectation[ n + offset + sub_tree_size + 1 ] 00657 * grand_parent_prob; 00658 } 00659 00660 // Right child 00661 left_of_grand_parent = true; 00662 for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00663 { 00664 if( left_of_grand_parent ) 00665 { 00666 grand_parent = n + offset + sub_tree_size + 1; 00667 grand_parent_prob = output[ grand_parent ]; 00668 left_of_grand_parent = false; 00669 } 00670 else 00671 { 00672 grand_parent = n + offset - 3*sub_tree_size - 3; 00673 grand_parent_prob = off_expectation[ grand_parent ]; 00674 left_of_grand_parent = true; 00675 } 00676 00677 output[ n + offset ] = local_node_expectation[ n + offset ] 00678 * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) 00679 * grand_parent_prob; 00680 off_expectation[ n + offset ] = (1 - local_node_expectation[ n + offset ]) 00681 * (1 - local_node_expectation[ n + offset - sub_tree_size - 1 ]) 00682 * grand_parent_prob; 00683 } 00684 sub_tree_size /= 2; 00685 depth++; 00686 } 00687 offset += n_nodes_per_tree; 00688 } 00689 00690 if( use_signed_samples ) 00691 for( int i=0; i<output.length(); i++ ) 00692 output[i] = output[i] - off_expectation[i]; 00693 } 00694 00695 void RBMWoodsLayer::fprop( const Mat& inputs, Mat& outputs ) 00696 { 00697 int mbatch_size = inputs.length(); 00698 PLASSERT( inputs.width() == size ); 00699 outputs.resize( mbatch_size, size ); 00700 00701 PLERROR( "RBMWoodsLayer::fprop(): not implemented yet" ); 00702 } 00703 00704 void RBMWoodsLayer::fprop( const Vec& input, const Vec& rbm_bias, 00705 Vec& output ) const 00706 { 00707 PLASSERT( input.size() == input_size ); 00708 PLASSERT( rbm_bias.size() == input_size ); 00709 output.resize( output_size ); 00710 00711 PLERROR( "RBMWoodsLayer::fprop(): not implemented yet" ); 00712 } 00713 00715 // bpropUpdate // 00717 void RBMWoodsLayer::bpropUpdate(const Vec& input, const Vec& output, 00718 Vec& input_gradient, 00719 const Vec& output_gradient, 00720 bool accumulate) 00721 { 00722 PLASSERT( input.size() == size ); 00723 PLASSERT( output.size() == size ); 00724 PLASSERT( output_gradient.size() == size ); 00725 00726 if( accumulate ) 00727 { 00728 PLASSERT_MSG( input_gradient.size() == size, 00729 "Cannot resize input_gradient AND accumulate into it" ); 00730 } 00731 else 00732 { 00733 input_gradient.resize( size ); 00734 input_gradient.clear(); 00735 } 00736 00737 // Compute gradient on marginal expectations 00738 int n_nodes_per_tree = size / n_trees; 00739 int node, depth, sub_tree_size, grand_parent; 00740 int offset = 0; 00741 bool left_of_grand_parent; 00742 real grand_parent_prob; 00743 real node_exp, parent_exp, out_grad, off_grad; 00744 local_node_expectation_gradient.clear(); 00745 on_tree_gradient.clear(); 00746 off_tree_gradient.clear(); 00747 00748 for( int t=0; t<n_trees; t++ ) 00749 { 00750 // Set other nodes, level-wise 00751 depth = tree_depth-1; 00752 sub_tree_size = 0; 00753 while( depth > 1 ) 00754 { 00755 // Left child 00756 left_of_grand_parent = true; 00757 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00758 { 00759 out_grad = output_gradient[ n + offset ] + 00760 on_tree_gradient[ n + offset ] ; 00761 off_grad = off_tree_gradient[ n + offset ] ; 00762 node_exp = local_node_expectation[ n + offset ]; 00763 parent_exp = local_node_expectation[ n + offset + sub_tree_size + 1 ]; 00764 00765 if( left_of_grand_parent ) 00766 { 00767 grand_parent = n + offset + 3*sub_tree_size + 3; 00768 if( use_signed_samples ) 00769 grand_parent_prob = output[ grand_parent ] + off_expectation[grand_parent]; 00770 else 00771 grand_parent_prob = output[ grand_parent ]; 00772 // Gradient for rest of the tree 00773 on_tree_gradient[ grand_parent ] += 00774 ( out_grad * node_exp 00775 + off_grad * (1 - node_exp) ) 00776 * parent_exp; 00777 left_of_grand_parent = false; 00778 } 00779 else 00780 { 00781 grand_parent = n + offset - sub_tree_size - 1; 00782 grand_parent_prob = off_expectation[ grand_parent ]; 00783 // Gradient for rest of the tree 00784 off_tree_gradient[ grand_parent ] += 00785 ( out_grad * node_exp 00786 + off_grad * (1 - node_exp) ) 00787 * parent_exp; 00788 left_of_grand_parent = true; 00789 } 00790 00791 // Gradient w/r current node 00792 local_node_expectation_gradient[ n + offset ] += 00793 ( out_grad - off_grad ) * parent_exp * grand_parent_prob; 00794 //* node_exp * ( 1 - node_exp ); 00795 00796 // Gradient w/r parent node 00797 local_node_expectation_gradient[ n + offset + sub_tree_size + 1 ] += 00798 ( out_grad * node_exp + off_grad * (1 - node_exp) ) * grand_parent_prob; 00799 //* parent_exp * (1-parent_exp) ; 00800 00801 } 00802 00803 // Right child 00804 left_of_grand_parent = true; 00805 for( int n=3*sub_tree_size+2; n<n_nodes_per_tree; n += 4*sub_tree_size + 4 ) 00806 { 00807 out_grad = output_gradient[ n + offset ] + 00808 on_tree_gradient[ n + offset ] ; 00809 off_grad = off_tree_gradient[ n + offset ] ; 00810 node_exp = local_node_expectation[ n + offset ]; 00811 parent_exp = local_node_expectation[ n + offset - sub_tree_size - 1 ]; 00812 00813 if( left_of_grand_parent ) 00814 { 00815 grand_parent = n + offset + sub_tree_size + 1; 00816 if( use_signed_samples ) 00817 grand_parent_prob = output[ grand_parent ] + off_expectation[ grand_parent ]; 00818 else 00819 grand_parent_prob = output[ grand_parent ]; 00820 // Gradient for rest of the tree 00821 on_tree_gradient[ grand_parent ] += 00822 ( out_grad * node_exp 00823 + off_grad * (1 - node_exp) ) 00824 * ( 1 - parent_exp ); 00825 left_of_grand_parent = false; 00826 } 00827 else 00828 { 00829 grand_parent = n + offset - 3*sub_tree_size - 3; 00830 grand_parent_prob = off_expectation[ grand_parent ]; 00831 // Gradient for rest of the tree 00832 off_tree_gradient[ grand_parent ] += 00833 ( out_grad * node_exp 00834 + off_grad * (1 - node_exp) ) 00835 * ( 1 - parent_exp ); 00836 left_of_grand_parent = true; 00837 } 00838 00839 // Gradient w/r current node 00840 local_node_expectation_gradient[ n + offset ] += 00841 ( out_grad - off_grad ) * ( 1 - parent_exp ) * grand_parent_prob; 00842 //* node_exp * ( 1 - node_exp ); 00843 00844 // Gradient w/r parent node 00845 local_node_expectation_gradient[ n + offset - sub_tree_size - 1 ] -= 00846 ( out_grad * node_exp + off_grad * (1 - node_exp) ) * grand_parent_prob; 00847 //* parent_exp * (1-parent_exp) ; 00848 } 00849 sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; 00850 depth--; 00851 } 00852 00854 depth = 1; 00855 00857 node = sub_tree_size; 00858 out_grad = output_gradient[ node + offset ] + 00859 on_tree_gradient[ node + offset ] ; 00860 off_grad = off_tree_gradient[ node + offset ] ; 00861 node_exp = local_node_expectation[ node + offset ]; 00862 parent_exp = local_node_expectation[ node + offset + sub_tree_size + 1 ]; 00863 00864 // Gradient w/r current node 00865 local_node_expectation_gradient[ node + offset ] += 00866 ( out_grad - off_grad ) * parent_exp; 00867 //* node_exp * ( 1 - node_exp ); 00868 00869 // Gradient w/r parent node 00870 local_node_expectation_gradient[ node + offset + sub_tree_size + 1 ] += 00871 ( out_grad * node_exp + off_grad * (1 - node_exp) ); 00872 //* parent_exp * (1-parent_exp) ; 00873 00875 node = 3*sub_tree_size+2; 00876 out_grad = output_gradient[ node + offset ] + 00877 on_tree_gradient[ node + offset ] ; 00878 off_grad = off_tree_gradient[ node + offset ] ; 00879 node_exp = local_node_expectation[ node + offset ]; 00880 parent_exp = local_node_expectation[ node + offset - sub_tree_size - 1 ]; 00881 00882 // Gradient w/r current node 00883 local_node_expectation_gradient[ node + offset ] += 00884 ( out_grad - off_grad ) * ( 1 - parent_exp ) ; 00885 //* node_exp * ( 1 - node_exp ); 00886 00887 // Gradient w/r parent node 00888 local_node_expectation_gradient[ node + offset - sub_tree_size - 1 ] -= 00889 ( out_grad * node_exp + off_grad * (1 - node_exp) ) ; 00890 //* parent_exp * (1-parent_exp) ; 00891 00893 node = n_nodes_per_tree / 2; 00894 sub_tree_size = 2 * ( sub_tree_size + 1 ) - 1; 00895 00896 out_grad = output_gradient[ node + offset ] + 00897 on_tree_gradient[ node + offset ] ; 00898 off_grad = off_tree_gradient[ node + offset ] ; 00899 node_exp = local_node_expectation[ node + offset ]; 00900 local_node_expectation_gradient[ node + offset ] += 00901 ( out_grad - off_grad );// * node_exp * ( 1 - node_exp ); 00902 00903 offset += n_nodes_per_tree; 00904 } 00905 00906 for( int i=0 ; i<size ; i++ ) 00907 { 00908 node_exp = local_node_expectation[i]; 00909 out_grad = local_node_expectation_gradient[i]; 00910 on_free_energy_gradient[i] = out_grad * node_exp * ( 1 - node_exp ); 00911 off_free_energy_gradient[i] = -out_grad * node_exp * ( 1 - node_exp ); 00912 } 00913 00914 offset = 0; 00915 for( int t=0; t<n_trees; t++ ) 00916 { 00917 depth = 0; 00918 sub_tree_size = n_nodes_per_tree / 2; 00919 00920 while( depth < tree_depth-1 ) 00921 { 00922 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00923 { 00924 out_grad = on_free_energy_gradient[ n + offset ]; 00925 node_exp = local_node_expectation[n + offset - (sub_tree_size/2+1)]; 00926 input_gradient[n+offset] += out_grad; 00927 on_free_energy_gradient[n + offset - (sub_tree_size/2+1)] += out_grad * node_exp; 00928 off_free_energy_gradient[n + offset - (sub_tree_size/2+1)] += out_grad * (1 - node_exp); 00929 00930 out_grad = off_free_energy_gradient[ n + offset ]; 00931 node_exp = local_node_expectation[n + offset + (sub_tree_size/2+1)]; 00932 if( use_signed_samples ) 00933 input_gradient[n+offset] -= out_grad; 00934 on_free_energy_gradient[n + offset + (sub_tree_size/2+1)] += out_grad * node_exp; 00935 off_free_energy_gradient[n + offset + (sub_tree_size/2+1)] += 00936 out_grad * (1 - node_exp); 00937 } 00938 sub_tree_size /= 2; 00939 depth++; 00940 } 00941 00942 depth = tree_depth-1; 00943 sub_tree_size = 0; 00944 00945 for( int n=sub_tree_size; n<n_nodes_per_tree; n += 2*sub_tree_size + 2 ) 00946 { 00947 input_gradient[n+offset] += on_free_energy_gradient[ n + offset ]; 00948 if( use_signed_samples ) 00949 input_gradient[n+offset] -= off_free_energy_gradient[ n + offset ]; 00950 } 00951 00952 offset += n_nodes_per_tree; 00953 } 00954 00955 if( momentum != 0. ) 00956 bias_inc.resize( size ); 00957 00958 for( int i=0 ; i<size ; i++ ) 00959 { 00960 real in_grad_i = input_gradient[i]; 00961 00962 if( momentum == 0. ) 00963 { 00964 // update the bias: bias -= learning_rate * input_gradient 00965 bias[i] -= learning_rate * in_grad_i; 00966 } 00967 else 00968 { 00969 // The update rule becomes: 00970 // bias_inc = momentum * bias_inc - learning_rate * input_gradient 00971 // bias += bias_inc 00972 bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i; 00973 bias[i] += bias_inc[i]; 00974 } 00975 } 00976 00977 applyBiasDecay(); 00978 } 00979 00980 void RBMWoodsLayer::bpropUpdate(const Mat& inputs, const Mat& outputs, 00981 Mat& input_gradients, 00982 const Mat& output_gradients, 00983 bool accumulate) 00984 { 00985 PLASSERT( inputs.width() == size ); 00986 PLASSERT( outputs.width() == size ); 00987 PLASSERT( output_gradients.width() == size ); 00988 00989 int mbatch_size = inputs.length(); 00990 PLASSERT( outputs.length() == mbatch_size ); 00991 PLASSERT( output_gradients.length() == mbatch_size ); 00992 00993 if( accumulate ) 00994 { 00995 PLASSERT_MSG( input_gradients.width() == size && 00996 input_gradients.length() == mbatch_size, 00997 "Cannot resize input_gradients and accumulate into it" ); 00998 } 00999 else 01000 { 01001 input_gradients.resize(mbatch_size, size); 01002 input_gradients.clear(); 01003 } 01004 01005 PLERROR( "RBMWoodsLayer::bpropUpdate(): not implemeted yet" ); 01006 01007 if( momentum != 0. ) 01008 bias_inc.resize( size ); 01009 01010 // TODO Can we do this more efficiently? (using BLAS) 01011 01012 // We use the average gradient over the mini-batch. 01013 real avg_lr = learning_rate / inputs.length(); 01014 01015 for (int j = 0; j < mbatch_size; j++) 01016 { 01017 for( int i=0 ; i<size ; i++ ) 01018 { 01019 real output_i = outputs(j, i); 01020 real in_grad_i = output_i * (1-output_i) * output_gradients(j, i); 01021 input_gradients(j, i) += in_grad_i; 01022 01023 if( momentum == 0. ) 01024 { 01025 // update the bias: bias -= learning_rate * input_gradient 01026 bias[i] -= avg_lr * in_grad_i; 01027 } 01028 else 01029 { 01030 PLERROR("In RBMWoodsLayer:bpropUpdate - Not implemented for " 01031 "momentum with mini-batches"); 01032 // The update rule becomes: 01033 // bias_inc = momentum * bias_inc - learning_rate * input_gradient 01034 // bias += bias_inc 01035 bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i; 01036 bias[i] += bias_inc[i]; 01037 } 01038 } 01039 } 01040 01041 applyBiasDecay(); 01042 } 01043 01044 01046 void RBMWoodsLayer::bpropUpdate(const Vec& input, const Vec& rbm_bias, 01047 const Vec& output, 01048 Vec& input_gradient, Vec& rbm_bias_gradient, 01049 const Vec& output_gradient) 01050 { 01051 PLASSERT( input.size() == size ); 01052 PLASSERT( rbm_bias.size() == size ); 01053 PLASSERT( output.size() == size ); 01054 PLASSERT( output_gradient.size() == size ); 01055 input_gradient.resize( size ); 01056 rbm_bias_gradient.resize( size ); 01057 01058 PLERROR( "RBMWoodsLayer::bpropUpdate(): not implemeted yet" ); 01059 01060 for( int i=0 ; i<size ; i++ ) 01061 { 01062 real output_i = output[i]; 01063 input_gradient[i] = output_i * (1-output_i) * output_gradient[i]; 01064 } 01065 01066 rbm_bias_gradient << input_gradient; 01067 } 01068 01069 real RBMWoodsLayer::fpropNLL(const Vec& target) 01070 { 01071 PLASSERT( target.size() == input_size ); 01072 01073 PLERROR( "RBMWoodsLayer::fpropNLL(): not implemeted yet" ); 01074 01075 real ret = 0; 01076 real target_i, activation_i; 01077 if(use_fast_approximations){ 01078 for( int i=0 ; i<size ; i++ ) 01079 { 01080 target_i = target[i]; 01081 activation_i = activation[i]; 01082 ret += tabulated_softplus(activation_i) - target_i * activation_i; 01083 // nll = - target*log(sigmoid(act)) -(1-target)*log(1-sigmoid(act)) 01084 // but it is numerically unstable, so use instead the following identity: 01085 // = target*softplus(-act) +(1-target)*(act+softplus(-act)) 01086 // = act + softplus(-act) - target*act 01087 // = softplus(act) - target*act 01088 } 01089 } else { 01090 for( int i=0 ; i<size ; i++ ) 01091 { 01092 target_i = target[i]; 01093 activation_i = activation[i]; 01094 ret += softplus(activation_i) - target_i * activation_i; 01095 } 01096 } 01097 return ret; 01098 } 01099 01100 void RBMWoodsLayer::fpropNLL(const Mat& targets, const Mat& costs_column) 01101 { 01102 // computeExpectations(); // why? 01103 01104 PLERROR( "RBMWoodsLayer::fpropNLL(): not implemeted yet" ); 01105 01106 PLASSERT( targets.width() == input_size ); 01107 PLASSERT( targets.length() == batch_size ); 01108 PLASSERT( costs_column.width() == 1 ); 01109 PLASSERT( costs_column.length() == batch_size ); 01110 01111 for (int k=0;k<batch_size;k++) // loop over minibatch 01112 { 01113 real nll = 0; 01114 real* activation = activations[k]; 01115 real* target = targets[k]; 01116 if(use_fast_approximations){ 01117 for( int i=0 ; i<size ; i++ ) // loop over outputs 01118 { 01119 if(!fast_exact_is_equal(target[i],0.0)) 01120 // nll -= target[i] * pl_log(expectations[i]); 01121 // but it is numerically unstable, so use instead 01122 // log (1/(1+exp(-x))) = -log(1+exp(-x)) = -softplus(-x) 01123 nll += target[i] * tabulated_softplus(-activation[i]); 01124 if(!fast_exact_is_equal(target[i],1.0)) 01125 // nll -= (1-target[i]) * pl_log(1-output[i]); 01126 // log (1 - 1/(1+exp(-x))) = log(exp(-x)/(1+exp(-x))) 01127 // = log(1/(1+exp(x))) 01128 // = -log(1+exp(x)) 01129 // = -softplus(x) 01130 nll += (1-target[i]) * tabulated_softplus(activation[i]); 01131 } 01132 } else { 01133 for( int i=0 ; i<size ; i++ ) // loop over outputs 01134 { 01135 if(!fast_exact_is_equal(target[i],0.0)) 01136 // nll -= target[i] * pl_log(expectations[i]); 01137 // but it is numerically unstable, so use instead 01138 // log (1/(1+exp(-x))) = -log(1+exp(-x)) = -softplus(-x) 01139 nll += target[i] * softplus(-activation[i]); 01140 if(!fast_exact_is_equal(target[i],1.0)) 01141 // nll -= (1-target[i]) * pl_log(1-output[i]); 01142 // log (1 - 1/(1+exp(-x))) = log(exp(-x)/(1+exp(-x))) 01143 // = log(1/(1+exp(x))) 01144 // = -log(1+exp(x)) 01145 // = -softplus(x) 01146 nll += (1-target[i]) * softplus(activation[i]); 01147 } 01148 } 01149 costs_column(k,0) = nll; 01150 } 01151 } 01152 01153 void RBMWoodsLayer::bpropNLL(const Vec& target, real nll, Vec& bias_gradient) 01154 { 01155 PLERROR( "RBMWoodsLayer::bpropNLL(): not implemeted yet" ); 01156 computeExpectation(); 01157 01158 PLASSERT( target.size() == input_size ); 01159 bias_gradient.resize( size ); 01160 01161 // bias_gradient = expectation - target 01162 substract(expectation, target, bias_gradient); 01163 } 01164 01165 void RBMWoodsLayer::bpropNLL(const Mat& targets, const Mat& costs_column, 01166 Mat& bias_gradients) 01167 { 01168 PLERROR( "RBMWoodsLayer::bpropNLL(): not implemeted yet" ); 01169 computeExpectations(); 01170 01171 PLASSERT( targets.width() == input_size ); 01172 PLASSERT( targets.length() == batch_size ); 01173 PLASSERT( costs_column.width() == 1 ); 01174 PLASSERT( costs_column.length() == batch_size ); 01175 bias_gradients.resize( batch_size, size ); 01176 01177 // bias_gradients = expectations - targets 01178 substract(expectations, targets, bias_gradients); 01179 01180 } 01181 01182 void RBMWoodsLayer::declareOptions(OptionList& ol) 01183 { 01184 declareOption(ol, "n_trees", &RBMWoodsLayer::n_trees, 01185 OptionBase::buildoption, 01186 "Number of trees in the woods."); 01187 01188 declareOption(ol, "tree_depth", &RBMWoodsLayer::tree_depth, 01189 OptionBase::buildoption, 01190 "Depth of the trees in the woods (1 gives the ordinary " 01191 "RBMBinomialLayer)."); 01192 01193 declareOption(ol, "use_signed_samples", &RBMWoodsLayer::use_signed_samples, 01194 OptionBase::buildoption, 01195 "Indication that samples should be in {-1,1}, not {0,1}, at nodes where a\n" 01196 "left/right decision is made. Other nodes are set to 0.\n"); 01197 01198 // Now call the parent class' declareOptions 01199 inherited::declareOptions(ol); 01200 } 01201 01202 void RBMWoodsLayer::build_() 01203 { 01204 PLASSERT( n_trees > 0 ); 01205 PLASSERT( tree_depth > 0 ); 01206 01207 if ( tree_depth < 2 ) 01208 PLERROR("RBMWoodsLayer::build_(): tree_depth < 2 not supported, use " 01209 "RBMBinomialLayer instead."); 01210 01211 size = n_trees * ( ipow( 2, tree_depth ) - 1 ); 01212 local_node_expectation.resize( size ); 01213 on_free_energy.resize( size ); 01214 off_free_energy.resize( size ); 01215 off_expectation.resize( size ); 01216 local_node_expectation_gradient.resize( size ); 01217 on_tree_gradient.resize( size ); 01218 off_tree_gradient.resize( size ); 01219 on_free_energy_gradient.resize( size ); 01220 off_free_energy_gradient.resize( size ); 01221 01222 // Must call parent's build, since size was just set 01223 inherited::build(); 01224 } 01225 01226 void RBMWoodsLayer::build() 01227 { 01228 inherited::build(); 01229 build_(); 01230 } 01231 01232 01233 void RBMWoodsLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies) 01234 { 01235 inherited::makeDeepCopyFromShallowCopy(copies); 01236 01237 deepCopyField( off_expectation, copies ); 01238 deepCopyField( off_expectations, copies ); 01239 deepCopyField( local_node_expectation, copies ); 01240 deepCopyField( local_node_expectations, copies ); 01241 deepCopyField( on_free_energy, copies ); 01242 deepCopyField( on_free_energies, copies ); 01243 deepCopyField( off_free_energy, copies ); 01244 deepCopyField( off_free_energies, copies ); 01245 deepCopyField( local_node_expectation_gradient, copies ); 01246 deepCopyField( on_tree_gradient, copies ); 01247 deepCopyField( off_tree_gradient, copies ); 01248 deepCopyField( on_free_energy_gradient, copies ); 01249 deepCopyField( off_free_energy_gradient, copies ); 01250 } 01251 01252 real RBMWoodsLayer::energy(const Vec& unit_values) const 01253 { 01254 PLERROR( "RBMWoodsLayer::energy(): not implemeted yet" ); 01255 return -dot(unit_values, bias); 01256 } 01257 01258 real RBMWoodsLayer::freeEnergyContribution(const Vec& unit_activations) 01259 const 01260 { 01261 PLASSERT( unit_activations.size() == size ); 01262 int n_nodes_per_tree = size / n_trees; 01263 tree_free_energies.resize(n_trees); 01264 tree_energies.resize(n_trees * (n_nodes_per_tree+1) ); 01265 01266 int offset=0; 01267 int sub_tree_size = n_nodes_per_tree / 2; 01268 int sub_root = sub_tree_size; 01269 real result = 0; 01270 real tree_energy = 0; 01271 real tree_free_energy = 0; 01272 real leaf_activation = 0; 01273 for( int t = 0; t<n_trees; t++ ) 01274 { 01275 for( int n = 0; n < n_nodes_per_tree; n = n+2 ) // Looking only at leaves 01276 { 01277 // Computation energy of tree 01278 tree_energy = 0; 01279 sub_tree_size = n_nodes_per_tree / 2; 01280 sub_root = sub_tree_size; 01281 for( int d=0; d<tree_depth-1; d++ ) 01282 { 01283 if( n < sub_root ) 01284 { 01285 tree_energy -= unit_activations[offset+sub_root]; 01286 sub_tree_size /= 2; 01287 sub_root -= sub_tree_size + 1; 01288 } 01289 else 01290 { 01291 if( use_signed_samples ) 01292 tree_energy -= -unit_activations[offset+sub_root]; 01293 sub_tree_size /= 2; 01294 sub_root += sub_tree_size+1; 01295 } 01296 } 01297 01298 leaf_activation = unit_activations[offset+n]; 01299 // Add free energy of tree with activated leaf 01300 if( n == 0) 01301 tree_free_energy = tree_energy - leaf_activation; 01302 else 01303 tree_free_energy = -logadd( -tree_energy + leaf_activation, 01304 -tree_free_energy ); 01305 tree_energies[offset+t+n] = tree_energy - leaf_activation; 01306 01307 // Add free_energy of tree with inactivated leaf 01308 if( use_signed_samples ) 01309 { 01310 tree_free_energy = -logadd( -tree_energy - leaf_activation, 01311 -tree_free_energy ); 01312 tree_energies[offset+t+n+1] = tree_energy + leaf_activation; 01313 } 01314 else 01315 { 01316 tree_free_energy = -logadd( -tree_energy, -tree_free_energy ); 01317 tree_energies[offset+t+n+1] = tree_energy; 01318 } 01319 } 01320 tree_free_energies[t] = tree_free_energy; 01321 result += tree_free_energy; 01322 offset += n_nodes_per_tree; 01323 } 01324 return result; 01325 } 01326 01327 void RBMWoodsLayer::freeEnergyContributionGradient( 01328 const Vec& unit_activations, 01329 Vec& unit_activations_gradient, 01330 real output_gradient, bool accumulate) const 01331 { 01332 PLASSERT( unit_activations.size() == size ); 01333 unit_activations_gradient.resize( size ); 01334 if( !accumulate ) unit_activations_gradient.clear(); 01335 01336 // This method assumes freeEnergyContribution() has been called before, 01337 // with the same unit_activations vector!!! 01338 01339 int n_nodes_per_tree = size / n_trees; 01340 int offset=0; 01341 int sub_tree_size = n_nodes_per_tree / 2; 01342 int sub_root = sub_tree_size; 01343 real tree_energy = 0; 01344 real tree_energy_gradient = 0; 01345 real tree_energy_leaf_on_gradient = 0; 01346 real tree_energy_leaf_off_gradient = 0; 01347 01348 // Fills in the internal variables tree_energies and tree_free_energies. 01349 // I have to do this because I can't assume the last time freeEnergyContribution was 01350 // called was with the same unit_activations... 01351 freeEnergyContribution(unit_activations); 01352 01353 unit_activations_neg_gradient.resize(size); 01354 unit_activations_neg_gradient_init.resize(size); 01355 unit_activations_neg_gradient_init.fill(false); 01356 if( use_signed_samples ) 01357 { 01358 unit_activations_pos_gradient.resize(size); 01359 unit_activations_pos_gradient_init.resize(size); 01360 unit_activations_pos_gradient_init.fill(false); 01361 } 01362 01363 for( int t = 0; t<n_trees; t++ ) 01364 { 01365 for( int n = 0; n < n_nodes_per_tree; n = n+2 ) // Looking only at leaves 01366 { 01367 // Computation energy of tree 01368 tree_energy = 0; 01369 sub_tree_size = n_nodes_per_tree / 2; 01370 sub_root = sub_tree_size; 01371 // First do things on log-scale 01372 tree_energy_leaf_on_gradient = -tree_energies[offset+t+n] + tree_free_energies[t]; 01373 tree_energy_leaf_off_gradient = -tree_energies[offset+t+n+1] + tree_free_energies[t]; 01374 tree_energy_gradient = logadd(tree_energy_leaf_on_gradient, 01375 tree_energy_leaf_off_gradient); 01376 for( int d=0; d<tree_depth-1; d++ ) 01377 { 01378 if( n < sub_root ) 01379 { 01380 if( unit_activations_neg_gradient_init[offset+sub_root] ) 01381 unit_activations_neg_gradient[offset+sub_root] = 01382 logadd(tree_energy_gradient, 01383 unit_activations_neg_gradient[offset+sub_root]); 01384 else 01385 { 01386 unit_activations_neg_gradient[offset+sub_root] = 01387 tree_energy_gradient; 01388 unit_activations_neg_gradient_init[offset+sub_root] = true; 01389 } 01390 01391 sub_tree_size /= 2; 01392 sub_root -= sub_tree_size + 1; 01393 } 01394 else 01395 { 01396 if( use_signed_samples ) 01397 { 01398 if( unit_activations_pos_gradient_init[offset+sub_root] ) 01399 unit_activations_pos_gradient[offset+sub_root] = 01400 logadd(tree_energy_gradient, 01401 unit_activations_pos_gradient[offset+sub_root]); 01402 else 01403 { 01404 unit_activations_pos_gradient[offset+sub_root] = 01405 tree_energy_gradient; 01406 unit_activations_pos_gradient_init[offset+sub_root] = true; 01407 } 01408 } 01409 sub_tree_size /= 2; 01410 sub_root += sub_tree_size+1; 01411 } 01412 } 01413 01414 unit_activations_neg_gradient[offset+n] = 01415 tree_energy_leaf_on_gradient; 01416 unit_activations_neg_gradient_init[offset+n] = true; 01417 01418 if( use_signed_samples ) 01419 { 01420 unit_activations_pos_gradient[offset+n] = 01421 tree_energy_leaf_off_gradient; 01422 unit_activations_pos_gradient_init[offset+n] = true; 01423 } 01424 } 01425 offset += n_nodes_per_tree; 01426 } 01427 01428 // Go back to linear-scale 01429 for(int i=0; i<size; i++) 01430 unit_activations_gradient[i] -= output_gradient * safeexp( unit_activations_neg_gradient[i] ); 01431 01432 if( use_signed_samples ) 01433 for(int i=0; i<size; i++) 01434 unit_activations_gradient[i] += output_gradient * 01435 safeexp( unit_activations_pos_gradient[i] ); 01436 } 01437 01438 int RBMWoodsLayer::getConfigurationCount() 01439 { 01440 real ret = ipow(ipow(2.0,tree_depth),n_trees); 01441 if( ret > INT_MAX ) 01442 return INFINITE_CONFIGURATIONS; 01443 else 01444 return (int) round(ret); 01445 } 01446 01447 void RBMWoodsLayer::getConfiguration(int conf_index, Vec& output) 01448 { 01449 PLASSERT( output.length() == size ); 01450 PLASSERT( conf_index >= 0 && conf_index < getConfigurationCount() ); 01451 01452 int n_conf_per_tree = ipow(2,tree_depth); 01453 int conf_i = conf_index; 01454 int begin = 0; 01455 int current_node, sub_tree_size, tree_conf_i; 01456 output.clear(); 01457 Vec output_i; 01458 for ( int i = 0; i < n_trees; ++i ) { 01459 output_i = output.subVec( begin, n_conf_per_tree-1 ); 01460 tree_conf_i = conf_i % n_conf_per_tree; 01461 // Get current tree's configuration 01462 output_i.clear(); 01463 current_node = (n_conf_per_tree-1)/2; 01464 sub_tree_size = current_node; 01465 for( int j=0; j < tree_depth; j++) 01466 { 01467 if( tree_conf_i < current_node + 1 ) 01468 { 01469 output_i[current_node] = 1; 01470 sub_tree_size /= 2; 01471 current_node -= sub_tree_size+1; 01472 } 01473 else 01474 { 01475 if( use_signed_samples ) 01476 output_i[current_node] = -1; 01477 sub_tree_size /= 2; 01478 current_node += sub_tree_size+1; 01479 } 01480 } 01481 conf_i /= n_conf_per_tree; 01482 begin += n_conf_per_tree-1; 01483 } 01484 } 01485 01486 } // end of namespace PLearn 01487 01488 01489 /* 01490 Local Variables: 01491 mode:c++ 01492 c-basic-offset:4 01493 c-file-style:"stroustrup" 01494 c-file-offsets:((innamespace . 0)(inline-open . 0)) 01495 indent-tabs-mode:nil 01496 fill-column:79 01497 End: 01498 */ 01499 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :