PLearn 0.1
RBMLocalMultinomialLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMLocalMultinomialLayer.cc
00004 //
00005 // Copyright (C) 2007 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Author: Pascal Lamblin
00036 
00041 #include "RBMLocalMultinomialLayer.h"
00042 #include <plearn/math/TMat_maths.h>
00043 #include "RBMConnection.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 // Helper functions, like the ones using Vecs, but with Mats
00049 template <class T>
00050 void softmax(const TMat<T>& x, const TMat<T>& y)
00051 {
00052     int l = x.length();
00053     int w = x.width();
00054     PLASSERT( y.length() == l );
00055     PLASSERT( y.width() == w );
00056 
00057     if (l*w>0)
00058     {
00059         TMatElementIterator<real> xp = x.begin();
00060         TMatElementIterator<real> yp = y.begin();
00061         T maxx = max(x);
00062         real s = 0;
00063 
00064         for (int i=0; i<l*w; i++, xp++, yp++)
00065             s += ( (*yp) = safeexp((*xp) - maxx) );
00066 
00067         if (s == 0)
00068             PLERROR( "Trying to divide by 0 in softmax");
00069         s = 1.0 / s;
00070 
00071         for (yp = y.begin(); yp != y.end(); yp++)
00072             (*yp) *= s;
00073     }
00074 }
00075 
00076 template <class T>
00077 T logadd(const TMat<T>& mat)
00078 {
00079     if (mat.isEmpty())
00080         return LOG_INIT;
00081 
00082     TMatElementIterator<real> p_mat = mat.begin();
00083     T sum = *p_mat++;
00084 
00085     for (int i=1; i<mat.size(); i++, p_mat++)
00086         sum = logadd(sum, *p_mat);
00087 
00088     return sum;
00089 }
00090 
00091 int multinomial_sample(const PP<PRandom>& rg, const Mat& distribution)
00092 {
00093     real u = rg->uniform_sample();
00094     TMatElementIterator<real> pi = distribution.begin();
00095     real s = *pi;
00096 #ifdef BOUNDCHECK
00097     int w = distribution.width();
00098 #endif
00099     int n = distribution.size();
00100     int i = 0;
00101 
00102     while (s<u && i<n)
00103     {
00104         PLASSERT( *pi == distribution(i / w, i % w) );
00105         i++;
00106         pi++;
00107         s += *pi;
00108     }
00109     if (i == n)
00110         i = n - 1; // Improbable, but...
00111     return i;
00112 }
00113 
00114 template<class T>
00115 void fill_one_hot(const TMat<T>& mat, int hotpos, T coldvalue, T hotvalue)
00116 {
00117     PLASSERT_MSG( mat.isNotEmpty(), "Given mat must not be empty" );
00118     PLASSERT_MSG( hotpos >= 0, "hotpos out of mat range" );
00119     PLASSERT_MSG( mat.size() > 1 || hotpos <= 1, "hotpos out of mat range" );
00120     PLASSERT_MSG( hotpos < mat.size() || mat.size() == 1,
00121                   "hotpos out of mat range" );
00122 
00123     if (mat.size() == 1)
00124         mat(0,0) = (hotpos == 0 ? coldvalue : hotvalue);
00125     else
00126     {
00127         mat.fill(coldvalue);
00128         int w = mat.width();
00129         mat(hotpos / w, hotpos % w);
00130     }
00131 }
00132 
00133 
00134 
00135 PLEARN_IMPLEMENT_OBJECT(
00136     RBMLocalMultinomialLayer,
00137     "Layer in an RBM, consisting in one multinomial unit",
00138     "");
00139 
00140 RBMLocalMultinomialLayer::RBMLocalMultinomialLayer( real the_learning_rate ) :
00141     inherited( the_learning_rate )
00142 {
00143 }
00144 
00145 void RBMLocalMultinomialLayer::generateSample()
00146 {
00147     PLASSERT_MSG(random_gen,
00148                  "random_gen should be initialized before generating samples");
00149 
00150     PLCHECK_MSG(expectation_is_up_to_date, "Expectation should be computed "
00151             "before calling generateSample()");
00152 
00153     for (int l=0; l<n_images; l++)
00154     {
00155         Mat expectation_image = expectation
00156             .subVec(l*images_size, images_size)
00157             .toMat(images_length, images_width);
00158         Mat sample_image = sample
00159             .subVec(l*images_size, images_size)
00160             .toMat(images_length, images_width);
00161 
00162         for (int i=0; i<images_length; i+=area_length)
00163             for (int j=0; j<images_width; j+=area_width)
00164             {
00165                 Mat expectation_area =
00166                     expectation_image.subMat(i, j, area_length, area_width);
00167                 Mat sample_area =
00168                     sample_image.subMat(i, j, area_length, area_width);
00169                 int index = multinomial_sample(random_gen, expectation_area);
00170                 fill_one_hot(sample_area, index, real(0), real(1));
00171             }
00172     }
00173 }
00174 
00175 void RBMLocalMultinomialLayer::generateSamples()
00176 {
00177     PLASSERT_MSG(random_gen,
00178                  "random_gen should be initialized before generating samples");
00179 
00180     PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed "
00181                         "before calling generateSamples()");
00182 
00183     PLASSERT( samples.width() == size && samples.length() == batch_size );
00184 
00185     for (int k = 0; k < batch_size; k++)
00186         for (int l=0; l<n_images; l++)
00187         {
00188             Mat expectation_image = expectations(k)
00189                 .subVec(l*images_size, images_size)
00190                 .toMat(images_length, images_width);
00191             Mat sample_image = samples(k)
00192                 .subVec(l*images_size, images_size)
00193                 .toMat(images_length, images_width);
00194 
00195             for (int i=0; i<images_length; i+=area_length)
00196                 for (int j=0; j<images_width; j+=area_width)
00197                 {
00198                     Mat expectation_area =
00199                         expectation_image.subMat(i, j, area_length, area_width);
00200                     Mat sample_area =
00201                         sample_image.subMat(i, j, area_length, area_width);
00202                     int index = multinomial_sample(random_gen,
00203                                                    expectation_area);
00204                     fill_one_hot(sample_area, index, real(0), real(1));
00205                }
00206         }
00207 }
00208 
00209 void RBMLocalMultinomialLayer::computeExpectation()
00210 {
00211     if( expectation_is_up_to_date )
00212         return;
00213 
00214     for (int l=0; l<n_images; l++)
00215     {
00216         Mat activation_image = activation
00217             .subVec(l*images_size, images_size)
00218             .toMat(images_length, images_width);
00219         Mat expectation_image = expectation
00220             .subVec(l*images_size, images_size)
00221             .toMat(images_length, images_width);
00222 
00223         for (int i=0; i<images_length; i+=area_length)
00224             for (int j=0; j<images_width; j+=area_width)
00225                 softmax(
00226                     activation_image.subMat(i, j, area_length, area_width),
00227                     expectation_image.subMat(i, j, area_length, area_width)
00228                     );
00229     }
00230     expectation_is_up_to_date = true;
00231 }
00232 
00233 void RBMLocalMultinomialLayer::computeExpectations()
00234 {
00235     if( expectations_are_up_to_date )
00236         return;
00237 
00238     PLASSERT( expectations.width() == size
00239               && expectations.length() == batch_size );
00240 
00241     for (int k = 0; k < batch_size; k++)
00242         for (int l=0; l<n_images; l++)
00243         {
00244             Mat activation_image = activations(k)
00245                 .subVec(l*images_size, images_size)
00246                 .toMat(images_length, images_width);
00247             Mat expectation_image = expectations(k)
00248                 .subVec(l*images_size, images_size)
00249                 .toMat(images_length, images_width);
00250 
00251             for (int i=0; i<images_length; i+=area_length)
00252                 for (int j=0; j<images_width; j+=area_width)
00253                     softmax(
00254                         activation_image.subMat(i, j, area_length, area_width),
00255                         expectation_image.subMat(i, j, area_length, area_width)
00256                         );
00257         }
00258 
00259     expectations_are_up_to_date = true;
00260 }
00261 
00262 
00263 void RBMLocalMultinomialLayer::fprop( const Vec& input, Vec& output ) const
00264 {
00265     PLASSERT( input.size() == input_size );
00266     output.resize( output_size );
00267 
00268     // inefficient
00269     Vec input_plus_bias = input + bias;
00270     for (int l=0; l<n_images; l++)
00271     {
00272         Mat input_image = input_plus_bias
00273             .subVec(l*images_size, images_size)
00274             .toMat(images_length, images_width);
00275         Mat output_image = output
00276             .subVec(l*images_size, images_size)
00277             .toMat(images_length, images_width);
00278 
00279         for (int i=0; i<images_length; i+=area_length)
00280             for (int j=0; j<images_width; j+=area_width)
00281                 softmax(
00282                     input_image.subMat(i, j, area_length, area_width),
00283                     output_image.subMat(i, j, area_length, area_width)
00284                     );
00285     }
00286 }
00287 
00289 // fprop //
00291 void RBMLocalMultinomialLayer::fprop( const Vec& input, const Vec& rbm_bias,
00292                                       Vec& output ) const
00293 {
00294     PLASSERT( input.size() == input_size );
00295     PLASSERT( rbm_bias.size() == input_size );
00296     output.resize( output_size );
00297 
00298     // inefficient
00299     Vec input_plus_bias = input + rbm_bias;
00300     for (int l=0; l<n_images; l++)
00301     {
00302         Mat input_image = input_plus_bias
00303             .subVec(l*images_size, images_size)
00304             .toMat(images_length, images_width);
00305         Mat output_image = output
00306             .subVec(l*images_size, images_size)
00307             .toMat(images_length, images_width);
00308 
00309         for (int i=0; i<images_length; i+=area_length)
00310             for (int j=0; j<images_width; j+=area_width)
00311                 softmax(
00312                     input_image.subMat(i, j, area_length, area_width),
00313                     output_image.subMat(i, j, area_length, area_width)
00314                     );
00315     }
00316 }
00317 
00319 // bpropUpdate //
00321 void RBMLocalMultinomialLayer::bpropUpdate(const Vec& input, const Vec& output,
00322                                            Vec& input_gradient,
00323                                            const Vec& output_gradient,
00324                                            bool accumulate)
00325 {
00326     PLASSERT( input.size() == size );
00327     PLASSERT( output.size() == size );
00328     PLASSERT( output_gradient.size() == size );
00329 
00330     if( accumulate )
00331     {
00332         PLASSERT_MSG( input_gradient.size() == size,
00333                       "Cannot resize input_gradient AND accumulate into it" );
00334     }
00335     else
00336     {
00337         input_gradient.resize( size );
00338         input_gradient.clear();
00339     }
00340 
00341     if( momentum != 0. )
00342         bias_inc.resize( size );
00343 
00344     for (int l=0; l<n_images; l++)
00345     {
00346         Mat output_image = output
00347             .subVec(l*images_size, images_size)
00348             .toMat(images_length, images_width);
00349         Mat input_grad_image = input_gradient
00350             .subVec(l*images_size, images_size)
00351             .toMat(images_length, images_width);
00352         Mat output_grad_image = output_gradient
00353             .subVec(l*images_size, images_size)
00354             .toMat(images_length, images_width);
00355         Mat bias_image = bias
00356             .subVec(l*images_size, images_size)
00357             .toMat(images_length, images_width);
00358         Mat bias_inc_image;
00359         if (momentum != 0)
00360             bias_inc_image = bias_inc
00361                 .subVec(l*images_size, images_size)
00362                 .toMat(images_length, images_width);
00363 
00364         for (int i=0; i<images_length; i+=area_length)
00365             for (int j=0; j<images_width; j+=area_width)
00366             {
00367                 Mat output_area = output_image
00368                     .subMat(i, j, area_length, area_width);
00369                 Mat input_grad_area = input_grad_image
00370                     .subMat(i, j, area_length, area_width);
00371                 Mat output_grad_area = output_grad_image
00372                     .subMat(i, j, area_length, area_width);
00373                 Mat bias_area = bias_image
00374                     .subMat(i, j, area_length, area_width);
00375                 Mat bias_inc_area;
00376                 if (momentum != 0)
00377                     bias_inc_area = bias_inc_image
00378                         .subMat(i, j, area_length, area_width);
00379 
00380                 real outga_dot_outa = dot(output_grad_area, output_area);
00381 
00382                 TMatElementIterator<real> pog = output_grad_area.begin();
00383                 TMatElementIterator<real> po = output_area.begin();
00384                 TMatElementIterator<real> pig = input_grad_area.begin();
00385                 TMatElementIterator<real> pb = bias_area.begin();
00386 
00387                 TMatElementIterator<real> pbi = bias_inc_area.begin();
00388 /*
00389                 TMatElementIterator<real> pbi;
00390                 if (momentum != 0)
00391                     pbi = bias_inc_area.begin();
00392 */
00393                 for (int m=0; m<area_size; m++, pog++, po++, pig++, pb++)
00394                 {
00395                     real inga_m = (*pog - outga_dot_outa) * (*po);
00396                     *pig += inga_m;
00397 
00398                     if (momentum == 0)
00399                     {
00400                         // update the bias: bias -= learning_rate * input_grad
00401                         *pb -= learning_rate * (*pig);
00402                     }
00403                     else
00404                     {
00405                         // The update rule becomes:
00406                         // bias_inc = momentum * bias_inc
00407                         //            - learning_rate * input_grad
00408                         *pbi = momentum * (*pbi) - learning_rate * (*pig);
00409                         *pb += *pbi;
00410                         pbi++;
00411                     }
00412                 }
00413             }
00414     }
00415 }
00416 
00417 void RBMLocalMultinomialLayer::bpropUpdate(const Mat& inputs,
00418                                            const Mat& outputs,
00419                                            Mat& input_gradients,
00420                                            const Mat& output_gradients,
00421                                            bool accumulate)
00422 {
00423     PLASSERT( inputs.width() == size );
00424     PLASSERT( outputs.width() == size );
00425     PLASSERT( output_gradients.width() == size );
00426 
00427     int mbatch_size = inputs.length();
00428     PLASSERT( outputs.length() == mbatch_size );
00429     PLASSERT( output_gradients.length() == mbatch_size );
00430 
00431     if( accumulate )
00432     {
00433         PLASSERT_MSG( input_gradients.width() == size &&
00434                 input_gradients.length() == inputs.length(),
00435                 "Cannot resize input_gradient and accumulate into it." );
00436     }
00437     else
00438     {
00439         input_gradients.resize(inputs.length(), size);
00440         input_gradients.clear();
00441     }
00442 
00443 
00444     if( momentum != 0. )
00445         bias_inc.resize( size );
00446 
00447     // TODO see if we can have a speed-up by reorganizing the different steps
00448 
00449     // input_gradients[k][i] =
00450     //   (output_gradients[k][i]-output_gradients[k].outputs[k]) outputs[k][i]
00451     real mean_lr = learning_rate / mbatch_size;
00452     for (int l=0; l<n_images; l++)
00453     {
00454         Mat bias_image = bias
00455             .subVec(l*images_size, images_size)
00456             .toMat(images_length, images_width);
00457         Mat bias_inc_image;
00458         if (momentum != 0)
00459             bias_inc_image = bias_inc
00460                 .subVec(l*images_size, images_size)
00461                 .toMat(images_length, images_width);
00462 
00463         for( int k=0; k<mbatch_size; k++ )
00464         {
00465             Mat output_image = outputs(k)
00466                 .subVec(l*images_size, images_size)
00467                 .toMat(images_length, images_width);
00468             Mat input_grad_image = input_gradients(k)
00469                 .subVec(l*images_size, images_size)
00470                 .toMat(images_length, images_width);
00471             Mat output_grad_image = output_gradients(k)
00472                 .subVec(l*images_size, images_size)
00473                 .toMat(images_length, images_width);
00474 
00475             for (int i=0; i<images_length; i+=area_length)
00476                 for (int j=0; j<images_width; j+=area_width)
00477                 {
00478                     Mat output_area = output_image
00479                         .subMat(i, j, area_length, area_width);
00480                     Mat input_grad_area = input_grad_image
00481                         .subMat(i, j, area_length, area_width);
00482                     Mat output_grad_area = output_grad_image
00483                         .subMat(i, j, area_length, area_width);
00484                     Mat bias_area = bias_image
00485                         .subMat(i, j, area_length, area_width);
00486                     Mat bias_inc_area;
00487                     if (momentum != 0)
00488                         bias_inc_area = bias_inc_image
00489                             .subMat(i, j, area_length, area_width);
00490 
00491                     real outga_dot_outa = dot(output_grad_area, output_area);
00492 
00493                     TMatElementIterator<real> pog = output_grad_area.begin();
00494                     TMatElementIterator<real> po = output_area.begin();
00495                     TMatElementIterator<real> pig = input_grad_area.begin();
00496                     TMatElementIterator<real> pb = bias_area.begin();
00497 
00498                     if (momentum == 0)
00499                     {
00500                         for (int i=0; i<area_size; i++, pog++, po++, pig++,
00501                                                    pb++)
00502                         {
00503                             real inga_i = (*pog - outga_dot_outa) * (*po);
00504                             *pig += inga_i;
00505 
00506                             // update the bias:
00507                             // bias -= learning_rate * input_grad
00508                             *pb -= mean_lr * (*pig);
00509                         }
00510                     }
00511                     else
00512                         PLCHECK_MSG(false,
00513                                     "Momentum and mini-batch not implemented");
00514                 }
00515         }
00516     }
00517 }
00518 
00520 void RBMLocalMultinomialLayer::bpropUpdate(const Vec& input,
00521                                            const Vec& rbm_bias,
00522                                            const Vec& output,
00523                                            Vec& input_gradient,
00524                                            Vec& rbm_bias_gradient,
00525                                            const Vec& output_gradient)
00526 {
00527     PLASSERT( input.size() == size );
00528     PLASSERT( rbm_bias.size() == size );
00529     PLASSERT( output.size() == size );
00530     PLASSERT( output_gradient.size() == size );
00531     input_gradient.resize( size );
00532     rbm_bias_gradient.resize( size );
00533 
00534     for (int l=0; l<n_images; l++)
00535     {
00536         Mat output_image = output
00537             .subVec(l*images_size, images_size)
00538             .toMat(images_length, images_width);
00539         Mat input_grad_image = input_gradient
00540             .subVec(l*images_size, images_size)
00541             .toMat(images_length, images_width);
00542         Mat output_grad_image = output_gradient
00543             .subVec(l*images_size, images_size)
00544             .toMat(images_length, images_width);
00545         Mat rbm_bias_image = rbm_bias
00546             .subVec(l*images_size, images_size)
00547             .toMat(images_length, images_width);
00548 
00549         for (int i=0; i<images_length; i+=area_length)
00550             for (int j=0; j<images_width; j+=area_width)
00551             {
00552                 Mat output_area = output_image
00553                     .subMat(i, j, area_length, area_width);
00554                 Mat input_grad_area = input_grad_image
00555                     .subMat(i, j, area_length, area_width);
00556                 Mat output_grad_area = output_grad_image
00557                     .subMat(i, j, area_length, area_width);
00558                 Mat rbm_bias_area = rbm_bias_image
00559                     .subMat(i, j, area_length, area_width);
00560 
00561                 real outga_dot_outa = dot(output_grad_area, output_area);
00562 
00563                 TMatElementIterator<real> pog = output_grad_area.begin();
00564                 TMatElementIterator<real> po = output_area.begin();
00565                 TMatElementIterator<real> pig = input_grad_area.begin();
00566                 TMatElementIterator<real> prb = rbm_bias_area.begin();
00567 
00568                 for (int m=0; m<area_size; m++, pog++, po++, pig++, prb++)
00569                 {
00570                     real inga_m = (*pog - outga_dot_outa) * (*po);
00571                     *pig += inga_m;
00572 
00573                     // update the bias: bias -= learning_rate * input_grad
00574                     *prb -= learning_rate * (*pig);
00575                 }
00576             }
00577     }
00578 
00579     rbm_bias_gradient << input_gradient;
00580 }
00581 
00583 // fpropNLL //
00585 real RBMLocalMultinomialLayer::fpropNLL(const Vec& target)
00586 {
00587     computeExpectation();
00588 
00589     PLASSERT( target.size() == input_size );
00590 
00591     real nll = 0;
00592     for (int l=0; l<n_images; l++)
00593     {
00594         Mat target_image = target
00595             .subVec(l*images_size, images_size)
00596             .toMat(images_length, images_width);
00597         Mat expectation_image = expectation
00598             .subVec(l*images_size, images_size)
00599             .toMat(images_length, images_width);
00600 
00601         for (int i=0; i<images_length; i+=area_length)
00602             for (int j=0; j<images_width; j+= area_width)
00603             {
00604                 Mat target_area = target_image
00605                     .subMat(i, j, area_length, area_width);
00606                 Mat expectation_area = expectation_image
00607                     .subMat(i, j, area_length, area_width);
00608 
00609 #ifdef BOUNDCHECK
00610                 if (!target_area.hasMissing())
00611                 {
00612                     PLASSERT_MSG( min(target_area) >= 0.,
00613                                   "Elements of \"target_areal\" should be"
00614                                   " positive" );
00615                     // Ensure the distribution probabilities sum to 1. We relax a
00616                     // bit the default tolerance as probabilities using
00617                     // exponentials could suffer numerical imprecisions.
00618                     if (!is_equal( sum(target_area), 1., 1., 1e-5, 1e-5 ))
00619                         PLERROR("In RBMLocalMultinomialLayer::fpropNLL -"
00620                                 " Elements of \"target_area\" should sum to 1"
00621                                 " (found a sum = %f)",
00622                                 sum(target_area));
00623                 }
00624 #endif
00625                 TMatElementIterator<real> p_tgt = target_area.begin();
00626                 TMatElementIterator<real> p_exp = expectation_area.begin();
00627                 for (int m=0; m<area_size; m++, p_tgt++, p_exp++)
00628                 {
00629                     if (!fast_exact_is_equal(*p_tgt, 0))
00630                         nll -= *p_tgt * pl_log(*p_exp);
00631                 }
00632             }
00633     }
00634     return nll;
00635 }
00636 
00637 void RBMLocalMultinomialLayer::fpropNLL(const Mat& targets, const Mat& costs_column)
00638 {
00639     computeExpectations();
00640 
00641     PLASSERT( targets.width() == input_size );
00642     PLASSERT( targets.length() == batch_size );
00643     PLASSERT( costs_column.width() == 1 );
00644     PLASSERT( costs_column.length() == batch_size );
00645 
00646     for (int k=0; k<batch_size; k++) // loop over minibatch
00647     {
00648         real nll = 0;
00649         for (int l=0; l<n_images; l++)
00650         {
00651             Mat target_image = targets(k)
00652                 .subVec(l*images_size, images_size)
00653                 .toMat(images_length, images_width);
00654             Mat expectation_image = expectations(k)
00655                 .subVec(l*images_size, images_size)
00656                 .toMat(images_length, images_width);
00657 
00658             for (int i=0; i<images_length; i+=area_length)
00659                 for (int j=0; j<images_width; j+= area_width)
00660                 {
00661                     Mat target_area = target_image
00662                         .subMat(i, j, area_length, area_width);
00663                     Mat expectation_area = expectation_image
00664                         .subMat(i, j, area_length, area_width);
00665 
00666 #ifdef BOUNDCHECK
00667                     if (!target_area.hasMissing())
00668                     {
00669                         PLASSERT_MSG( min(target_area) >= 0.,
00670                                       "Elements of \"target_areal\" should be"
00671                                       " positive" );
00672                         // Ensure the distribution probabilities sum to 1. We relax a
00673                         // bit the default tolerance as probabilities using
00674                         // exponentials could suffer numerical imprecisions.
00675                         if (!is_equal( sum(target_area), 1., 1., 1e-5, 1e-5 ))
00676                             PLERROR("In RBMLocalMultinomialLayer::fpropNLL -"
00677                                     " Elements of \"target_area\" should sum"
00678                                     " to 1 (found a sum = %f) at row %d",
00679                                     sum(target_area), k);
00680                     }
00681 #endif
00682                     TMatElementIterator<real> p_tgt = target_area.begin();
00683                     TMatElementIterator<real> p_exp = expectation_area.begin();
00684                     for (int m=0; m<area_size; m++, p_tgt++, p_exp++)
00685                     {
00686                         if (!fast_exact_is_equal(*p_tgt, 0))
00687                             nll -= *p_tgt * pl_log(*p_exp);
00688                     }
00689                 }
00690         }
00691         costs_column(k, 0) = nll;
00692     }
00693 }
00694 
00695 void RBMLocalMultinomialLayer::bpropNLL(const Vec& target, real nll,
00696                                         Vec& bias_gradient)
00697 {
00698     computeExpectation();
00699 
00700     PLASSERT( target.size() == input_size );
00701     bias_gradient.resize( size );
00702 
00703     // bias_gradient = expectation - target
00704     substract(expectation, target, bias_gradient);
00705 }
00706 
00707 void RBMLocalMultinomialLayer::bpropNLL(const Mat& targets, const Mat& costs_column,
00708                                         Mat& bias_gradients)
00709 {
00710     computeExpectations();
00711 
00712     PLASSERT( targets.width() == input_size );
00713     PLASSERT( targets.length() == batch_size );
00714     PLASSERT( costs_column.width() == 1 );
00715     PLASSERT( costs_column.length() == batch_size );
00716     bias_gradients.resize( batch_size, size );
00717 
00718     // bias_gradients = expectations - targets
00719     substract(expectations, targets, bias_gradients);
00720 }
00721 
00722 void RBMLocalMultinomialLayer::declareOptions(OptionList& ol)
00723 {
00724     declareOption(ol, "n_images", &RBMLocalMultinomialLayer::n_images,
00725                   OptionBase::buildoption,
00726                   "Number of images in the layer.");
00727 
00728     declareOption(ol, "images_length",
00729                   &RBMLocalMultinomialLayer::images_length,
00730                   OptionBase::buildoption,
00731                   "Length of the images.");
00732 
00733     declareOption(ol, "images_width",
00734                   &RBMLocalMultinomialLayer::images_width,
00735                   OptionBase::buildoption,
00736                   "Width of the images.");
00737 
00738     declareOption(ol, "images_size",
00739                   &RBMLocalMultinomialLayer::images_size,
00740                   OptionBase::learntoption,
00741                   "images_width × images_length.");
00742 
00743     declareOption(ol, "area_length",
00744                   &RBMLocalMultinomialLayer::area_length,
00745                   OptionBase::buildoption,
00746                   "Length of the areas over which the multinomial is set.");
00747 
00748     declareOption(ol, "area_width",
00749                   &RBMLocalMultinomialLayer::area_width,
00750                   OptionBase::buildoption,
00751                   "Width of the areas over which the multinomial is set.");
00752 
00753     declareOption(ol, "area_size",
00754                   &RBMLocalMultinomialLayer::area_size,
00755                   OptionBase::learntoption,
00756                   "area_width × area_length.");
00757 
00758 /*
00759     declareOption(ol, "size", &RBMLocalMultinomialLayer::size,
00760                   OptionBase::buildoption,
00761                   "Number of units.");
00762 */
00763     // Now call the parent class' declareOptions
00764     inherited::declareOptions(ol);
00765 
00766     redeclareOption(ol, "size",
00767                   &RBMLocalMultinomialLayer::size,
00768                   OptionBase::learntoption,
00769                   "n_images × images_width × images_length.");
00770 
00771 }
00772 
00773 void RBMLocalMultinomialLayer::build_()
00774 {
00775     PLCHECK_MSG(images_length % area_length == 0,
00776                 "\"images_length\" should be a multiple of \"area_length\"");
00777     PLCHECK_MSG(images_width % area_width == 0,
00778                 "\"images_width\" should be a multiple of \"area_width\"");
00779 
00780     images_size = images_length * images_width;
00781     area_size = area_length * area_width;
00782     size = images_size * n_images;
00783     n_areas = size / area_size;
00784 }
00785 
00786 void RBMLocalMultinomialLayer::build()
00787 {
00788     inherited::build();
00789     build_();
00790 }
00791 
00792 
00793 void RBMLocalMultinomialLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00794 {
00795     inherited::makeDeepCopyFromShallowCopy(copies);
00796 }
00797 
00798 real RBMLocalMultinomialLayer::energy(const Vec& unit_values) const
00799 {
00800     return -dot(unit_values, bias);
00801 }
00802 
00803 
00804 real RBMLocalMultinomialLayer::freeEnergyContribution(
00805     const Vec& activation_values) const
00806 {
00807     PLASSERT( activation_values.size() == size );
00808 
00809     // result =
00810     //  -\sum_{i=0}^{n_areas-1} log(\sum_{j=0}^{area_size-1} exp(a_{ij}))
00811     real result = 0;
00812     Mat activation_images = activation_values
00813         .toMat(n_images*images_length, images_width);
00814     for (int i=0; i<n_areas; i++)
00815     {
00816         Mat activation_area = activation_images
00817             .subMat((i/images_width)*area_length,
00818                     (i*area_width) % images_width,
00819                     area_length,
00820                     area_width);
00821 
00822         result -= logadd(activation_area);
00823     }
00824     return result;
00825 }
00826 
00827 int RBMLocalMultinomialLayer::getConfigurationCount()
00828 {
00829     real approx_count = pow(real(area_size), n_areas);
00830     int count = 1;
00831     if (approx_count > 1e30)
00832         count = INFINITE_CONFIGURATIONS;
00833     else
00834         for (int i=0; i<n_areas; i++)
00835             count *= area_size;
00836 
00837     return count;
00838 }
00839 
00840 void RBMLocalMultinomialLayer::getConfiguration(int conf_index, Vec& output)
00841 {
00842     PLASSERT( output.length() == size );
00843     PLASSERT( conf_index >= 0 && conf_index < getConfigurationCount() );
00844 
00845     output.clear();
00846     Mat output_images = output.toMat(n_images*images_length, images_width);
00847     for (int i=0; i<n_areas; i++)
00848     {
00849         int area_conf_index = conf_index % area_size;
00850         conf_index /= area_size;
00851 
00852         Mat output_area = output_images
00853             .subMat((i/images_width)*area_length,
00854                     (i*area_width) % images_width,
00855                     area_length,
00856                     area_width );
00857 
00858         output_area(area_conf_index/area_width, area_conf_index%area_width)=1;
00859     }
00860 }
00861 
00862 
00863 } // end of namespace PLearn
00864 
00865 
00866 /*
00867   Local Variables:
00868   mode:c++
00869   c-basic-offset:4
00870   c-file-style:"stroustrup"
00871   c-file-offsets:((innamespace . 0)(inline-open . 0))
00872   indent-tabs-mode:nil
00873   fill-column:79
00874   End:
00875 */
00876 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines