PLearn 0.1
TransformationLearner.cc
Go to the documentation of this file.
00001     // -*- C++ -*-
00002 
00003 
00004 // TransformationLearner.cc
00005 //
00006 // Copyright (C) 2007 Lysiane Bouchard
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 //
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 //
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 //
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 //
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 //
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 // Authors: Lysiane Bouchard
00037 
00041 #include "TransformationLearner.h"
00042 
00043 namespace PLearn {
00044 using namespace std;
00045 
00046 PLEARN_IMPLEMENT_OBJECT(
00047     TransformationLearner,
00048     "ONE LINE DESCR",
00049     "NO HELP"
00050 );
00051 
00053 // TransformationLearner //
00055 TransformationLearner::TransformationLearner():
00056     behavior(BEHAVIOR_LEARNER),
00057     minimumProba(0.0001),
00058     transformFamily(TRANSFORM_FAMILY_LINEAR_INCREMENT),
00059     withBias(false),
00060     learnNoiseVariance(false),
00061     regOnNoiseVariance(false),
00062     learnTransformDistribution(false),
00063     regOnTransformDistribution(false),
00064     emphasisOnDiversity(false),
00065     diversityFactor(0),
00066     initializationMode(INIT_MODE_DEFAULT),
00067     largeEStepAPeriod(UNDEFINED),
00068     largeEStepAOffset(UNDEFINED),
00069     largeEStepBPeriod(UNDEFINED),
00070     largeEStepBOffset(UNDEFINED),
00071     noiseVariancePeriod(UNDEFINED),
00072     noiseVarianceOffset(UNDEFINED),
00073     noiseAlpha(NOISE_ALPHA_NO_REG),
00074     noiseBeta(NOISE_BETA_NO_REG),
00075     transformDistributionPeriod(UNDEFINED),
00076     transformDistributionOffset(UNDEFINED),
00077     transformDistributionAlpha(TRANSFORM_DISTRIBUTION_ALPHA_NO_REG),
00078     transformsPeriod(UNDEFINED),
00079     transformsOffset(UNDEFINED),
00080     biasPeriod(UNDEFINED),
00081     biasOffset(UNDEFINED),
00082     noiseVariance(UNDEFINED),
00083     transformsVariance(1.0),
00084     nbTransforms(2),
00085     nbNeighbors(2)
00086 {
00087 }
00088 
00089 
00091 // declareOptions //
00093 void TransformationLearner::declareOptions(OptionList& ol)
00094 {
00095     // ### Declare all of this object's options here.
00096     // ### For the "flags" of each option, you should typically specify
00097     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00098     // ### OptionBase::tuningoption. If you don't provide one of these three,
00099     // ### this option will be ignored when loading values from a script.
00100     // ### You can also combine flags, for example with OptionBase::nosave:
00101     // ### (OptionBase::buildoption | OptionBase::nosave)
00102 
00103     // ### ex:
00104     // declareOption(ol, "myoption", &TransformationLearner::myoption,
00105     //               OptionBase::buildoption,
00106     //               "Help text describing this option");
00107     // ...
00108 
00109 
00110     //buildoption
00111   
00112 
00113     declareOption(ol,
00114                   "behavior",
00115                   &TransformationLearner::behavior,
00116                   OptionBase::buildoption,
00117                   "a transformationLearner might behave as a learner or as a generator");
00118     declareOption(ol,
00119                   "minimumProba",
00120                   &TransformationLearner::minimumProba,
00121                   OptionBase::buildoption,
00122                   "initial weight that will be needed sometimes");
00123     declareOption(ol,
00124                   "transformFamily",
00125                   &TransformationLearner::transformFamily,
00126                   OptionBase::buildoption,
00127                   "global form of the transformation functions");
00128     declareOption(ol,
00129                   "withBias",
00130                   &TransformationLearner::withBias,
00131                   OptionBase::buildoption,
00132                   "yes/no: add a bias to the transformation function ?");
00133     declareOption(ol,
00134                   "learnNoiseVariance",
00135                   &TransformationLearner::learnNoiseVariance,
00136                   OptionBase::buildoption,
00137                   "the noise variance is ...fixed/learned ?");
00138     declareOption(ol,
00139                   "regOnNoiseVariance",
00140                   &TransformationLearner::regOnNoiseVariance,
00141                   OptionBase::buildoption,
00142                   "yes/no: prior assumptions on the noise variance?");
00143     declareOption(ol,
00144                   "learnTransformDistribution",
00145                   &TransformationLearner::learnTransformDistribution,
00146                   OptionBase::buildoption,
00147                   "the transformation distribution is ... fixed/learned ?");
00148     declareOption(ol,
00149                   "regOnTransformDistribution",
00150                   &TransformationLearner::regOnTransformDistribution,
00151                   OptionBase::buildoption,
00152                   "yes/no: prior assumptions on the transformation distribution ?");
00153     
00154     declareOption(ol,
00155                   "emphasisOnDiversity",
00156                   &TransformationLearner::emphasisOnDiversity,
00157                   OptionBase::buildoption,
00158                   "increase probability of a set of transformations if they are more diversified \n"
00159                   "note: -the learning process is changed :\n"
00160                   "       the transformation functions can no more be updated at the same time \n"
00161                   "      -we assume there are no bias added to the transformation functions \n");
00162 
00163     declareOption(ol,
00164                   "diversityFactor",
00165                   &TransformationLearner::diversityFactor,
00166                   OptionBase::buildoption,
00167                   "positive real number: high value  gives  high importance to diversity among transformations \n"
00168                   "(has an effect only if the boolean 'emphasisOnDiversity' is set to True)\n");
00169     declareOption(ol,
00170                   "initializationMode",
00171                   &TransformationLearner::initializationMode,
00172                   OptionBase::buildoption,
00173                   "how the initial values of the parameters to learn are choosen?");
00174     
00175     declareOption(ol,
00176                   "largeEStepAPeriod",
00177                   &TransformationLearner::largeEStepAPeriod,
00178                   OptionBase::buildoption,
00179                   "time interval between two updates of the reconstruction set\n"
00180                   "(version A, method largeEStepA())");
00181     declareOption(ol,
00182                   "largeEStepAOffset",
00183                   &TransformationLearner::largeEStepAOffset,
00184                   OptionBase::buildoption,
00185                   "time of the first update of the reconstruction set"
00186                   "(version A, method largeEStepA())");
00187     declareOption(ol,
00188                   "largeEStepBPeriod",
00189                   &TransformationLearner::largeEStepBPeriod,
00190                   OptionBase::buildoption,
00191                   "time interval between two updates of the reconstruction set\n"
00192                   "(version  B, method largeEStepB())"); 
00193     declareOption(ol,
00194                   "noiseVariancePeriod",
00195                   &TransformationLearner::noiseVariancePeriod,
00196                   OptionBase::buildoption,
00197                   "time interval between two updates of the noise variance");
00198     declareOption(ol,
00199                   "noiseVarianceOffset",
00200                   &TransformationLearner::noiseVarianceOffset,
00201                   OptionBase::buildoption,
00202                   "time of the first update of the noise variance");
00203     declareOption(ol,
00204                   "noiseAlpha",
00205                   &TransformationLearner::noiseAlpha,
00206                   OptionBase::buildoption,
00207                   "parameter of the prior distribution of the noise variance");
00208    declareOption(ol,
00209                  "noiseBeta",
00210                  &TransformationLearner::noiseBeta,
00211                  OptionBase::buildoption,
00212                  "parameter of the prior distribution of the noise variance");
00213    declareOption(ol,
00214                  "transformDistributionPeriod",
00215                  &TransformationLearner::transformDistributionPeriod,
00216                  OptionBase::buildoption,
00217                  "time interval between two updates of the transformation distribution");
00218    declareOption(ol, 
00219                  "transformDistributionOffset",
00220                  &TransformationLearner::transformDistributionOffset,
00221                  OptionBase::buildoption,
00222                  "time of the first update of the transformation distribution");
00223    declareOption(ol, 
00224                  "transformDistributionAlpha",
00225                  &TransformationLearner::transformDistributionAlpha,
00226                  OptionBase::buildoption,
00227                  "parameter of the prior distribution of the transformation distribution");
00228    declareOption(ol,
00229                  "transformsPeriod",
00230                  &TransformationLearner::transformsPeriod,
00231                  OptionBase::buildoption,
00232                  "time interval between two updates of the transformations matrices");
00233    declareOption(ol,
00234                  "transformsOffset",
00235                  &TransformationLearner::transformsOffset,
00236                  OptionBase::buildoption,
00237                  "time of the first update of the transformations matrices");
00238 
00239    declareOption(ol,
00240                  "biasPeriod",
00241                  &TransformationLearner::biasPeriod,
00242                  OptionBase::buildoption,
00243                  "time interval between two updates of the transformations bias");
00244    declareOption(ol,
00245                  "biasOffset",
00246                  &TransformationLearner::biasOffset,
00247                  OptionBase::buildoption,
00248                  "time of the first update of the transformations bias");
00249 
00250    declareOption(ol, 
00251                  "noiseVariance",
00252                  &TransformationLearner::noiseVariance,
00253                  OptionBase::buildoption,
00254                  "noise variance (noise = random variable normally distributed)");
00255    declareOption(ol, 
00256                  "transformsVariance",
00257                  &TransformationLearner::transformsVariance,
00258                  OptionBase::buildoption,
00259                  "variance on the transformation parameters (normally distributed)");
00260    declareOption(ol, 
00261                  "nbTransforms",
00262                  &TransformationLearner::nbTransforms,
00263                  OptionBase::buildoption,
00264                  "how many transformations?");
00265    declareOption(ol, 
00266                  "nbNeighbors",
00267                  &TransformationLearner::nbNeighbors,
00268                  OptionBase::buildoption,
00269                  "how many neighbors?");
00270    declareOption(ol, 
00271                  "transformDistribution",
00272                  &TransformationLearner::transformDistribution,
00273                  OptionBase::buildoption,
00274                  "transformation distribution");
00275    
00276    //learntoption
00277    declareOption(ol,
00278                  "train_set",
00279                  &TransformationLearner::train_set,
00280                  OptionBase::learntoption,
00281                  "We remember the training set, as this is a memory-based distribution." );
00282    declareOption(ol,
00283                  "transformsSet",
00284                  &TransformationLearner::transformsSet,
00285                  OptionBase::learntoption,
00286                  "set of transformations \n)"
00287                  "implemented as a mdXd matrix,\n"
00288                  "     where m is the number of transformations\n"
00289                  "           and d is dimensionality of the input space");
00290    declareOption(ol,
00291                  "transforms",
00292                  &TransformationLearner::transforms,
00293                  OptionBase::learntoption,
00294                  "set of transformations\n"
00295                  "vector form of the previous set:\n)"
00296                  "    kth element of the vector = view on the kth sub-matrix");
00297    declareOption(ol,
00298                  "biasSet",
00299                  &TransformationLearner::biasSet,
00300                  OptionBase::learntoption,
00301                  "set of bias (one by transformation)");
00302    declareOption(ol,
00303                  "inputSpaceDim",
00304                  &TransformationLearner::inputSpaceDim,
00305                  OptionBase::learntoption,
00306                  "dimensionality of the input space");
00307    
00308    declareOption(ol,
00309                  "reconstructionSet",
00310                  &TransformationLearner::reconstructionSet,
00311                  OptionBase::learntoption,
00312                  "set of weighted reconstruction candidates");
00313  
00314    // Now call the parent class' declareOptions().
00315    inherited::declareOptions(ol);
00316 }
00317 
00318 void TransformationLearner::declareMethods(RemoteMethodMap& rmm){
00319 
00320 
00321 
00322     rmm.inherited(inherited::_getRemoteMethodMap_());
00323     
00324     declareMethod(rmm, 
00325                   "initTransformsParameters",
00326                   &TransformationLearner::initTransformsParameters,
00327                   (BodyDoc("initializes the transformation parameters randomly \n"
00328                            "  (all parameters are a priori independent and normally distributed)")));
00329    
00330     declareMethod(rmm, 
00331                   "setTransformsParameters",
00332                   &TransformationLearner::setTransformsParameters,
00333                   (BodyDoc("initializes the transformation parameters with the given values"),
00334                    ArgDoc("TVec<Mat> transforms", "initial transformation matrices"),
00335                    ArgDoc("Mat  biasSet","initial bias (one by transformation) (optional)")));
00336     declareMethod(rmm, 
00337                   "initNoiseVariance",
00338                   &TransformationLearner::initNoiseVariance,
00339                   (BodyDoc("initializes the noise variance randomly (gamma distribution)")));
00340     declareMethod(rmm, 
00341                   "setNoiseVariance",
00342                   &TransformationLearner::setNoiseVariance,
00343                   (BodyDoc("initializes the noise variance to the given value"),
00344                    ArgDoc("real nv","noise variance")));
00345     declareMethod(rmm, 
00346                   "initTransformDistribution",
00347                   &TransformationLearner::initTransformDistribution,
00348                   (BodyDoc("initializes the transformation distribution randomly \n"
00349                            "-we use a dirichlet distribution \n"
00350                            "-we store log-probabilities instead probabilities")));
00351     declareMethod(rmm, 
00352                   "setTransformDistribution",
00353                   &TransformationLearner::setTransformDistribution,
00354                   (BodyDoc("initializes the transformation distribution with the given values \n"
00355                            " -the given values might represent log-probabilities"),
00356                    ArgDoc("Vec td","initial values of the transformation distribution")));
00357     
00358     declareMethod(rmm,
00359                   "returnPredictedFrom",
00360                   &TransformationLearner::returnPredictedFrom,
00361                   (BodyDoc("generates a sample data point from a source data point and returns it \n"
00362                            " - a specific transformation is used"),
00363                    ArgDoc("const Vec source","source data point"),
00364                    ArgDoc("int transformIdx","index of the transformation (optional)"),
00365                    RetDoc("Vec")));
00366     declareMethod(rmm,
00367                   "returnGeneratedSamplesFrom",
00368                   &TransformationLearner::returnGeneratedSamplesFrom,
00369                   (BodyDoc("generates samples data points form a source data point and return them \n"
00370                            "    -we use a specific transformation"),
00371                    ArgDoc("Vec source","source data point"),
00372                    ArgDoc("int n","number of samples"),
00373                    ArgDoc("int transformIdx", "index of the transformation (optional)"),
00374                    RetDoc("nXd matrix (one row = one sample)")));
00375     declareMethod(rmm,
00376                   "pickTransformIdx",
00377                   &TransformationLearner::pickTransformIdx,
00378                   (BodyDoc("select a transformation ramdomly"),
00379                    RetDoc("int (index of the choosen transformation)")));
00380                
00381     declareMethod(rmm,
00382                   "pickNeighborIdx",
00383                   &TransformationLearner::pickNeighborIdx,
00384                   (BodyDoc("select a neighbor among the data points in the training set"),
00385                    RetDoc("int (index of the data point in the training set)")));
00386     declareMethod(rmm,
00387                   "returnTreeDataSet",
00388                   &TransformationLearner::returnTreeDataSet,
00389                   (BodyDoc("creates and returns a data set using a 'tree generation process'\n"
00390                            " see 'treeDataSet()' implantation for more details"),
00391                    ArgDoc("Vec root","data point from which all the other data points will derive (directly or indirectly)"),
00392                    ArgDoc("int deepness","deepness of the tree reprenting the samples created"),
00393                    ArgDoc("int branchingFactor","branching factor of the tree representing the samples created"),
00394                    ArgDoc("int transformIdx", "index of the transformation to use (optional)"),
00395                    RetDoc("Mat (one row = one sample)")));
00396     declareMethod(rmm,
00397                   "returnSequenceDataSet",
00398                   &TransformationLearner::returnSequenceDataSet,
00399                   (BodyDoc("creates and returns a data set using a 'sequential procedure' \n"
00400                            "see 'sequenceDataSet()' implantation for more details"),
00401                    ArgDoc("const Vec start","data point from which all the other data points will derice (directly or indirectly)"),
00402                    ArgDoc("int n","number of sample data points to generate"),
00403                    ArgDoc("int transformIdx","index of the transformation to use (optional)"),
00404                    RetDoc("nXd matrix (one row = one sample)")));
00405     declareMethod(rmm,
00406                   "returnTrainingPoint",
00407                   &TransformationLearner::returnTrainingPoint,
00408                   (BodyDoc("returns the 'idx'th data point in the training set"),
00409                    ArgDoc("int idx","index of the data point in the training set"),
00410                    RetDoc("Vec")));
00411     declareMethod(rmm,
00412                   "returnReconstructionCandidates",
00413                   &TransformationLearner::returnReconstructionCandidates,
00414                   (BodyDoc("return all the reconstructions candidates associated to a given target"),
00415                    ArgDoc("int targetIdx","index of the target data point in the training set"),
00416                    RetDoc("TVec<ReconstructionCandidate>")));
00417     declareMethod(rmm,
00418                   "returnReconstructions",
00419                   &TransformationLearner::returnReconstructions,
00420                   (BodyDoc("returns the reconstructions of the 'targetIdx'th data point in the training set \n"
00421                            "(one reconstruction per reconstruction candidate)"),
00422                    ArgDoc("int targetIdx","index of the target data point in the training set"),
00423                    RetDoc("Mat (ith row = reconstruction associated to the ith reconstruction candidate)")));
00424     declareMethod(rmm,
00425                   "returnNeighbors",
00426                   &TransformationLearner::returnNeighbors,
00427                   (BodyDoc("returns the choosen neighbors of the target\n"
00428                            "  (one neighbor per reconstruction candidate)"),
00429                    ArgDoc("int targetIdx","index of the target in the training set"),
00430                    RetDoc("Mat (ith row = neighbor associated to the ith reconstruction candidate)")));
00431     declareMethod(rmm,
00432                   "returnTransform",
00433                   &TransformationLearner::returnTransform,
00434                   (BodyDoc("returns the parameters of the 'transformIdx'th transformation"),
00435                    ArgDoc("int transformIdx","index of the transformation"),
00436                    RetDoc("Mat")));
00437     declareMethod(rmm,
00438                   "returnAllTransforms",
00439                   &TransformationLearner::returnAllTransforms,
00440                   (BodyDoc("returns the parameters of each transformation"),
00441                    RetDoc("mdXd matrix, m = number of transformations \n"
00442                           "             d = dimensionality of the input space")));
00443     
00444     declareMethod(rmm,"buildLearnedParameters",
00445                   &TransformationLearner::buildLearnedParameters,
00446                   (BodyDoc("builds the structures related to learned parameters")));
00447     declareMethod(rmm,
00448                   "generatorBuild",
00449                   &TransformationLearner::generatorBuild,
00450                   (BodyDoc("generator specific initialization operations"),
00451                    ArgDoc("int inputSpaceDim","dimensionality of the input space"),
00452                    ArgDoc("TVec<Mat> transforms_", "transformations matrices"),
00453                    ArgDoc("Mat biasSet_","transformations bias"),
00454                    ArgDoc("real noiseVariance_","noise variance"),
00455                    ArgDoc("transformDistribution_", "transformation distribution")));
00456     declareMethod(rmm,
00457                   "gamma_sample",
00458                   &TransformationLearner::gamma_sample,
00459                   (BodyDoc("returns a pseudo-random positive real value using the distribution p(x)=Gamma(x |alpha,beta)"),
00460                    ArgDoc("real alpha",">=1"),
00461                    ArgDoc("real beta",">= 0 (optional: default value==1)"),
00462                    RetDoc("real >=0")));
00463     declareMethod(rmm,
00464                   "return_dirichlet_sample",
00465                   &TransformationLearner::return_dirichlet_sample,
00466                   (BodyDoc("returns a pseudo-random positive real vector using the distribution p(x)=Dirichlet(x|alpha)"),
00467                    ArgDoc("real alpha","all the parameters of the distribution are equal to 'alpha'"),
00468                    RetDoc("Vec (each element is between 0 and 1 , the elements sum to one)")));
00469 /* declareMethod(rmm,
00470    "return_dirichlet_sample",
00471    &TransformationLearner::return_dirichlet_sample,
00472    (BodyDoc("returns a pseudo-random positive real vector using the distribution p(x)=Dirichlet(x|alphas)"),
00473    ArgDoc("Vec alphas","parameters of the distribution"),
00474    RetDoc("Vec (each element is between 0 and 1, the elements sum to one )"))); */
00475     declareMethod(rmm,
00476                   "initEStep",
00477                   &TransformationLearner::initEStep,
00478                   (BodyDoc("initial expectation step")));
00479     declareMethod(rmm,
00480                   "EStep",
00481                   &TransformationLearner::EStep,
00482                   (BodyDoc("coordination of the different kinds of expectation steps")));
00483     declareMethod(rmm,
00484                   "largeEStepA",
00485                   &TransformationLearner::largeEStepA,
00486                   (BodyDoc("update the reconstruction set \n"
00487                            "for each target, keeps the most probable <neighbor, transformation> pairs")));
00488     declareMethod(rmm,
00489                   "largeEStepB",
00490                   &TransformationLearner::largeEStepB,
00491                   (BodyDoc("update the reconstruction set \n"
00492                            "for each <target,transformation> pairs,choose the most probable neighbors ")));
00493     declareMethod(rmm,
00494                   "smallEStep",
00495                   &TransformationLearner::smallEStep,
00496                   (BodyDoc("update the weights of the reconstruction candidates")));
00497     declareMethod(rmm,
00498                   "MStep",
00499                   &TransformationLearner::MStep,
00500                   (BodyDoc("coordination of the different kinds of maximization step")));
00501     declareMethod(rmm,
00502                   "MStepTransformDistribution",
00503                   &TransformationLearner::MStepTransformDistribution,
00504                   (BodyDoc("maximization step with respect to transformation distribution parameters")));
00505     declareMethod(rmm,
00506                   "MStepTransformations",
00507                   &TransformationLearner::MStepTransformations,
00508                   (BodyDoc("maximization step with respect to transformation matrices (MAP version)")));
00509     declareMethod(rmm,
00510                   "MStepTransformationDiv",
00511                   &TransformationLearner::MStepTransformationDiv,
00512                   (BodyDoc("maximization step with respect to a specific transformation matrix (MAP version + emphasis on diversity)"),
00513                    ArgDoc("int transformIdx","index of the transformation matrix to optimize")));
00514     declareMethod(rmm,
00515                   "MStepBias",
00516                   &TransformationLearner::MStepBias,
00517                   (BodyDoc("maximization step with respect to transformation bias (MAP version)")));
00518     declareMethod(rmm,
00519                   "MStepNoiseVariance",
00520                   &TransformationLearner::MStepNoiseVariance,
00521                   (BodyDoc("maximization step with respect to noise variance")));
00522     declareMethod(rmm,
00523                   "nextStage",
00524                   &TransformationLearner::nextStage,
00525                   (BodyDoc("increment 'stage' by one")));
00526 
00527 }
00528 
00529 
00531 // build //
00533 void TransformationLearner::build()
00534 {
00535 
00536     // ### Nothing to add here, simply calls build_().
00537     inherited::build();
00538     build_();
00539 }
00540 
00542 // build_ //
00544 void TransformationLearner::build_()
00545 {
00546     // ### This method should do the real building of the object,
00547     // ### according to set 'options', in *any* situation.
00548     // ### Typical situations include:
00549     // ###  - Initial building of an object from a few user-specified options
00550     // ###  - Building of a "reloaded" object: i.e. from the complete set of
00551     // ###    all serialised options.
00552     // ###  - Updating or "re-building" of an object after a few "tuning"
00553     // ###    options have been modified.
00554     // ### You should assume that the parent class' build_() has already been
00555     // ### called.
00556 
00557     // ### In general, you will want to call this class' specific methods for
00558     // ### conditional distributions.
00559     // TransformationLearner::setPredictorPredictedSizes(predictor_size,
00560     //                                          predicted_size,
00561     //                                          false);
00562     // TransformationLearner::setPredictor(predictor_part, false);
00563 
00564  
00565 
00566     if(behavior == BEHAVIOR_LEARNER)
00567     {
00568         if(train_set.isNotNull())
00569         {
00570             mainLearnerBuild();
00571         }
00572      
00573     }
00574    
00575     else{
00576         generatorBuild(); //initialization of the parameters with all the default values
00577     }
00578         
00579 }
00580 
00581 // ### Remove this method if your distribution does not implement it.
00583 // forget //
00585 void TransformationLearner::forget()
00586 {
00587     
00588     
00595     //PLERROR("forget method not implemented for TransformationLearner");
00596     
00597     inherited::forget();
00598     stage = 0;
00599     build();
00600    
00601     
00602 }
00603 
00605 // generate //
00607 
00613 void TransformationLearner::generate(Vec & y) const
00614 {
00615     //PLERROR("generate not implemented for TransformationLearner");
00616     PLASSERT(y.length() == inputSpaceDim);
00617     int neighborIdx ;
00618     neighborIdx=pickNeighborIdx();
00619     Vec neighbor;
00620     neighbor.resize(inputSpaceDim);
00621     seeTrainingPoint(neighborIdx, neighbor);
00622     generatePredictedFrom(neighbor, y);
00623 }
00624 
00625 // ### Default version of inputsize returns learner->inputsize()
00626 // ### If this is not appropriate, you should uncomment this and define
00627 // ### it properly here:
00628 int TransformationLearner::inputsize() const {
00629     return inputSpaceDim;
00630 }
00631 
00632 
00633 
00634 
00636 // log_density //
00638 real TransformationLearner::log_density(const Vec& y) const
00639 {
00640     PLASSERT(y.length() == inputSpaceDim);
00641     real weight;
00642     real totalWeight = INIT_weight(0);
00643     real scalingFactor = -1*(pl_log(pow(2*Pi*noiseVariance, inputSpaceDim/2.0)) 
00644                              +
00645                              pl_log(trainingSetLength));
00646     for(int neighborIdx=0; neighborIdx<trainingSetLength; neighborIdx++){
00647         seeTrainingPoint(neighborIdx,ses_neighbor);
00648         for(int transformIdx=0 ; transformIdx<nbTransforms ; transformIdx++){
00649             weight = computeReconstructionWeight(y,
00650                                                  ses_neighbor,
00651                                                  transformIdx,
00652                                                  ses_predictedTarget);
00653             weight = MULT_weights(weight,
00654                                   transformDistribution[transformIdx]);
00655             totalWeight = SUM_weights(weight,totalWeight);
00656         }  
00657     }
00658     totalWeight = MULT_weights(totalWeight, scalingFactor);
00659     return totalWeight;
00660 }
00661 
00662 
00663 
00665 // makeDeepCopyFromShallowCopy //
00667 void TransformationLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00668 {
00669     inherited::makeDeepCopyFromShallowCopy(copies);
00670 
00671     // ### Call deepCopyField on all "pointer-like" fields
00672     // ### that you wish to be deepCopied rather than
00673     // ### shallow-copied.
00674     // ### ex:
00675     // deepCopyField(trainvec, copies);
00676     
00677 
00678     // ### Remove this line when you have fully implemented this method.
00679     //PLERROR("TransformationLearner::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00680 }
00681 
00683 // resetGenerator //
00685 /*void TransformationLearner::resetGenerator(long g_seed) const
00686 {
00687     PLERROR("resetGenerator not implemented for TransformationLearner");
00688 }
00689 */
00690 
00691 // ### Remove this method, if your distribution does not implement it.
00693 // train //
00695 void TransformationLearner::train()
00696 {
00697     
00698   
00699     //PLERROR("train method not implemented for TransformationLearner");
00700     // The role of the train method is to bring the learner up to
00701     // stage==nstages, updating train_stats with training costs measured
00702     // on-line in the process.
00703 
00704     /* TYPICAL CODE:
00705 
00706     static Vec input;  // static so we don't reallocate memory each time...
00707     static Vec target; // (but be careful that static means shared!)
00708     input.resize(inputsize());    // the train_set's inputsize()
00709     target.resize(targetsize());  // the train_set's targetsize()
00710     real weight;
00711 
00712     // This generic PLearner method does a number of standard stuff useful for
00713     // (almost) any learner, and return 'false' if no training should take
00714     // place. See PLearner.h for more details.
00715     if (!initTrain())
00716         return;
00717 
00718     while(stage<nstages)
00719     {
00720         // clear statistics of previous epoch
00721         train_stats->forget();
00722 
00723         //... train for 1 stage, and update train_stats,
00724         // using train_set->getExample(input, target, weight)
00725         // and train_stats->update(train_costs)
00726 
00727         ++stage;
00728         train_stats->finalize(); // finalize statistics for this epoch
00729     }
00730     */
00731 
00732     if(stage==0)
00733         buildLearnedParameters();
00734         initEStep();
00735     while(stage<nstages)
00736     {
00737         MStep();
00738         EStep();
00739         stage ++;
00740     }
00741     
00742 }
00743 
00744 
00745 
00746 void TransformationLearner::buildLearnedParameters(){
00747     
00748     //LEARNED PARAMETERS
00749 
00750 
00751     //set of transformations matrices
00752     transformsSet = Mat(nbTransforms * inputSpaceDim, inputSpaceDim);
00753     
00754     //view on the set of transformations (vector)
00755     //    each transformation = one matrix 
00756     transforms.resize(nbTransforms);
00757     for(int k = 0; k< nbTransforms; k++){
00758         transforms[k] = transformsSet.subMatRows(k * inputSpaceDim, inputSpaceDim);       
00759     }
00760     
00761     //set of transformations bias (optional)
00762     if(withBias){
00763         biasSet = Mat(nbTransforms,inputSpaceDim);       
00764     }
00765     else{
00766         biasSet = Mat(0,0);   
00767     }
00768 
00769     //choose an initial value for each transformation parameter  (normal distribution) 
00770     initTransformsParameters();
00771 
00772     //initialize the noise variance
00773     if(noiseVariance == UNDEFINED){
00774         if(learnNoiseVariance && regOnNoiseVariance){
00775             initNoiseVariance();
00776         }
00777         else{
00778             noiseVariance = 1.0;
00779         }
00780     }
00781 
00782     //transformDistribution
00783     if(transformDistribution.length() == 0){
00784         if(learnTransformDistribution && regOnTransformDistribution)
00785             initTransformDistribution();
00786         else{
00787             transformDistribution.resize(nbTransforms);
00788             real w = INIT_weight(1.0/nbTransforms);
00789             for(int k=0; k<nbTransforms ; k++){
00790                 transformDistribution[k] = w;
00791             }
00792         }       
00793     }
00794     else{
00795         PLASSERT(transformDistribution.length() == nbTransforms);
00796         PLASSERT(isWellDefined(transformDistribution));
00797     }
00798 
00799 
00800      //reconstruction set 
00801     reconstructionSet.resize(nbReconstructions);
00802 
00803 
00804 }
00805 
00806 
00807 //INITIALIZATION METHODS 
00808 
00809 
00812 void TransformationLearner::mainLearnerBuild(){
00813 
00814     //dimension of the input space
00815     inputSpaceDim = train_set->inputsize();
00816 
00817     //some storage variables that we will re-use to save time
00818     newDistribution.resize(nbTransforms) ;
00819     ses_target.resize(inputSpaceDim);
00820     ses_neighbor.resize(inputSpaceDim);
00821     ses_predictedTarget.resize(inputSpaceDim);
00822     lg_neighbor.resize(inputSpaceDim);
00823     lg_predictedTarget.resize(inputSpaceDim);
00824     fnn_target.resize(inputSpaceDim);
00825     fnn_neighbor.resize(inputSpaceDim);
00826     fbtrc_neighbor.resize(inputSpaceDim);
00827     fbtrc_target.resize(inputSpaceDim);
00828     fbtrc_predictedTarget.resize(inputSpaceDim);
00829     fbwn_target.resize(inputSpaceDim);
00830     fbwn_neighbor.resize(inputSpaceDim);
00831     fbwn_predictedTarget.resize(inputSpaceDim);
00832     mst_v.resize(inputSpaceDim);
00833     mst_target.resize(inputSpaceDim);
00834     mst_neighbor.resize(inputSpaceDim);
00835     mst_pivots.resize(inputSpaceDim);
00836     msb_newBiasSet.resize(nbTransforms,inputSpaceDim);
00837     msb_norms.resize(nbTransforms);
00838     msb_target.resize(inputSpaceDim);
00839     msb_neighbor.resize(inputSpaceDim);
00840     msb_reconstruction.resize(inputSpaceDim);
00841     msnvMAP_total_k.resize(inputSpaceDim);
00842     msnvMAP_target.resize(inputSpaceDim);
00843     msnvMAP_neighbor.resize(inputSpaceDim);
00844     msnvMAP_reconstruction.resize(inputSpaceDim);
00845     mstd_B.resize(inputSpaceDim,inputSpaceDim);
00846     mstd_C.resize(inputSpaceDim,inputSpaceDim);
00847     mstd_D.resize(inputSpaceDim,inputSpaceDim);
00848     mstd_v.resize(inputSpaceDim);
00849     mstd_target.resize(inputSpaceDim);
00850     mstd_neighbor.resize(inputSpaceDim);
00851     mstd_pivots.resize(inputSpaceDim);
00852     
00853     //put more emphasis on diversity among transformation?
00854     if(emphasisOnDiversity){
00855         PLASSERT(!withBias);
00856         if(diversityFactor<=0){
00857             diversityFactor = 1.0/transformsVariance;  
00858         }
00859     }
00860     else{
00861         diversityFactor = 0;
00862     }
00863 
00864 
00865     int defaultPeriod = 1;
00866     int defaultTransformsOffset=0;
00867     int defaultBiasOffset=0;
00868     int defaultNoiseVarianceOffset=0;
00869     int defaultTransformDistributionOffset=0;
00870 
00871     defaultTransformsOffset = 0;
00872     
00873     if(withBias){
00874         defaultBiasOffset = defaultPeriod ;
00875         defaultPeriod++;
00876     }
00877     if(learnNoiseVariance){
00878         defaultNoiseVarianceOffset = defaultPeriod;
00879         defaultPeriod++;
00880     }
00881     if(learnTransformDistribution){
00882         defaultTransformDistributionOffset = defaultPeriod;
00883         defaultPeriod ++;
00884     }
00885     
00886     
00887     transformsSD = sqrt(transformsVariance);
00888     
00889     //DIMENSION VARIABLES
00890           
00891     //number of samples given in the training set
00892     trainingSetLength = train_set->length();
00893     
00894     
00895     //number of reconstruction candidates related to a specific target in the 
00896     //reconstruction set.   
00897     nbTargetReconstructions = nbNeighbors * nbTransforms;
00898 
00899     //total number of reconstruction candidates in the reconstruction set
00900     nbReconstructions = trainingSetLength * nbTargetReconstructions;
00901     
00902     
00903     
00904 
00905     if(withBias){
00906         if(biasPeriod == UNDEFINED || biasOffset == UNDEFINED){
00907             biasPeriod = defaultPeriod;
00908             biasOffset = defaultBiasOffset;
00909         }
00910     }
00911 
00912     else{
00913         biasPeriod = UNDEFINED ;
00914         biasOffset = UNDEFINED;
00915     }
00916 
00917  
00918 
00919    
00920     if(transformsPeriod == UNDEFINED || transformsOffset == UNDEFINED){
00921         transformsPeriod = defaultPeriod;
00922         transformsOffset = defaultTransformsOffset;
00923     }
00924 
00925     //training parameters for noise variance
00926     if(learnNoiseVariance){
00927         if(noiseVariancePeriod == UNDEFINED || noiseVarianceOffset == UNDEFINED){
00928             noiseVariancePeriod = defaultPeriod;
00929             noiseVarianceOffset = defaultNoiseVarianceOffset;
00930         }
00931         if(regOnNoiseVariance){
00932             if(noiseAlpha < 1)
00933                 noiseAlpha = 1;
00934             if(noiseBeta <= 0){
00935                 noiseBeta = 1;
00936             }
00937         }
00938         else{
00939             noiseAlpha = NOISE_ALPHA_NO_REG;
00940             noiseBeta = NOISE_BETA_NO_REG;
00941         }
00942     }
00943     else{
00944         noiseVariancePeriod = UNDEFINED;
00945         noiseVarianceOffset = UNDEFINED;
00946     }
00947     
00948  
00949     
00950      //training parameters for transformation distribution
00951      if(learnTransformDistribution){
00952          if(transformDistributionPeriod == UNDEFINED || transformDistributionOffset == UNDEFINED){
00953              transformDistributionPeriod = defaultPeriod;
00954              transformDistributionOffset = defaultTransformDistributionOffset;
00955          }
00956          if(regOnTransformDistribution){
00957              if(transformDistributionAlpha<=0){
00958                  transformDistributionAlpha =10;
00959              }
00960              else{
00961                  transformDistributionAlpha = TRANSFORM_DISTRIBUTION_ALPHA_NO_REG;
00962              }
00963          }
00964      }
00965      else{
00966          transformDistributionPeriod = UNDEFINED;
00967          transformDistributionOffset = UNDEFINED;
00968      }
00969 
00970 
00971  
00972    
00973     
00974     
00975 
00976     //OTHER VARIABLES
00977     
00978     
00979      
00980     //Storage space used in the update of the transformation parameters
00981     B_C = Mat(2 * nbTransforms * inputSpaceDim , inputSpaceDim);
00982     
00983     B.resize(nbTransforms);
00984     C.resize(nbTransforms);
00985     for(int k=0; k<nbTransforms; k++){
00986         B[k]= B_C.subMatRows(k*inputSpaceDim, inputSpaceDim);
00987     }
00988     for(int k= nbTransforms ; k<2*nbTransforms ; k++){
00989         C[(k % nbTransforms)] = B_C.subMatRows(k*inputSpaceDim, inputSpaceDim);
00990     }
00991     
00992     
00993 }
00994 
00995 
00998 void TransformationLearner::generatorBuild( int inputSpaceDim_,
00999                                             TVec<Mat> transforms_,
01000                                             Mat biasSet_,
01001                                             real noiseVariance_,
01002                                             Vec transformDistribution_){
01003     
01004     inputSpaceDim = inputSpaceDim_;
01005     transformsSD = sqrt(transformsVariance);
01006     
01007 
01008     //transformations parameters
01009 
01010     
01011     transformsSet = Mat(nbTransforms * inputSpaceDim, inputSpaceDim);
01012     transforms.resize(nbTransforms);
01013     for(int k = 0; k< nbTransforms; k++){
01014         transforms[k] = transformsSet.subMatRows(k * inputSpaceDim, inputSpaceDim);       
01015     }
01016     
01017     if(withBias){
01018         biasSet = Mat(nbTransforms,inputSpaceDim);
01019     }
01020     else{
01021         biasSet = Mat(0,0);
01022     }
01023     if(transforms_.length() == 0){
01024         initTransformsParameters();
01025     }
01026     else{
01027         setTransformsParameters(transforms_,biasSet_);
01028     }
01029     
01030 
01031     //noise variance
01032     if(noiseAlpha < 1){
01033             noiseAlpha = 1;
01034         }
01035     if(noiseBeta <= 0){
01036         noiseBeta = 1;
01037     }
01038     if(noiseVariance_ <= 0){
01039         initNoiseVariance();
01040     }
01041     else{
01042         setNoiseVariance(noiseVariance_);
01043     }
01044     //transformation distribution
01045     if(transformDistributionAlpha <=0)
01046         transformDistributionAlpha = 10;
01047     if(transformDistribution_.length()==0){
01048         initTransformDistribution();
01049     }
01050     else{
01051         setTransformDistribution(transformDistribution_);
01052     }
01053 }
01054 
01055 
01058 void TransformationLearner::initTransformsParameters()
01059 {
01060     
01061     transformsSet .resize(nbTransforms*inputSpaceDim, inputSpaceDim);
01062     transforms.resize(nbTransforms);
01063     for(int k = 0; k< nbTransforms; k++){
01064         transforms[k] = transformsSet.subMatRows(k * inputSpaceDim, inputSpaceDim);       
01065     }
01066     for(int t=0; t<nbTransforms ; t++){
01067         random_gen->fill_random_normal(transforms[t], 0 , transformsSD);
01068     }
01069     if(withBias){
01070         biasSet = Mat(nbTransforms,inputSpaceDim);
01071         random_gen->fill_random_normal(biasSet, 0,transformsSD);
01072     }
01073     else{
01074         biasSet = Mat(0,0);
01075     }
01076     if(transformFamily == TRANSFORM_FAMILY_LINEAR){
01077         for(int t=0; t<nbTransforms;t++){
01078             addToDiagonal(transforms[t],1.0);
01079         }
01080     }
01081 }
01082 
01085 void TransformationLearner::setTransformsParameters(TVec<Mat> transforms_,
01086                                                     Mat biasSet_)
01087 {
01088     
01089     PLASSERT(transforms_.length() == nbTransforms);
01090     
01091     int nbRows = inputSpaceDim*nbTransforms;
01092     transformsSet.resize(nbRows,inputSpaceDim);
01093     transforms.resize(nbTransforms);
01094     for(int k = 0; k< nbTransforms; k++){
01095         transforms[k] = transformsSet.subMatRows(k * inputSpaceDim, inputSpaceDim);       
01096     }
01097 
01098 
01099     int rowIdx = 0;
01100     for(int t=0; t<nbTransforms; t++){
01101         PLASSERT(transforms_[t].width() == inputSpaceDim);
01102         PLASSERT(transforms_[t].length() == inputSpaceDim);
01103         transformsSet.subMatRows(rowIdx,inputSpaceDim) << transforms_[t];
01104         transforms[t]= transformsSet.subMatRows(rowIdx,inputSpaceDim);
01105         rowIdx += inputSpaceDim;
01106     }
01107     if(withBias){    
01108         PLASSERT(biasSet_.length() == nbTransforms);
01109         PLASSERT(biasSet_.width() == inputSpaceDim);
01110         biasSet = Mat(nbTransforms, inputSpaceDim);
01111         biasSet << biasSet_;
01112     }
01113     else{
01114         biasSet = Mat(0,0);
01115     }
01116     
01117 
01118 }
01119 
01122 void TransformationLearner::initNoiseVariance()
01123 {
01124     real noisePrecision = gamma_sample(noiseAlpha, noiseBeta);
01125     PLASSERT(noisePrecision != 0);
01126     noiseVariance = 1.0/noisePrecision;
01127 }
01128 
01130 void TransformationLearner::setNoiseVariance(real nv)
01131 {
01132     PLASSERT(nv > 0);
01133     noiseVariance = nv;
01134 }
01135 
01136 
01139 void TransformationLearner::initTransformDistribution()
01140 {
01141     
01142     transformDistribution.resize(nbTransforms);
01143     dirichlet_sample(transformDistributionAlpha, transformDistribution);
01144     for(int i=0; i<nbTransforms ;i++){
01145         transformDistribution[i] = INIT_weight(transformDistribution[i]);
01146     } 
01147 }
01148 
01150 void TransformationLearner::setTransformDistribution(Vec td)
01151 {
01152     PLASSERT(td.length() == nbTransforms);
01153     PLASSERT(isWellDefined(td));
01154     transformDistribution.resize(nbTransforms);
01155     transformDistribution << td;
01156 }
01157 
01158 
01159 //GENERATION
01160 
01162 void TransformationLearner::generatePredictedFrom(const Vec & source,
01163                                                   Vec & sample)const
01164 {
01165     
01166     int transformIdx = pickTransformIdx();
01167     generatePredictedFrom(source, sample, transformIdx);
01168 }
01169 
01171 void TransformationLearner::generatePredictedFrom(const Vec & source,
01172                                                   Vec & sample,
01173                                                   int transformIdx)const
01174 {
01175     //TODO
01176     real noiseSD = pow(noiseVariance,0.5);
01177     int d = source.length();
01178     PLASSERT(d == inputSpaceDim);
01179     PLASSERT(sample.length() == inputSpaceDim);
01180     PLASSERT(0<= transformIdx && transformIdx<nbTransforms);
01181     
01182     //apply the transformation
01183     applyTransformationOn(transformIdx,source,sample);
01184     
01185     //add noise
01186     for(int i=0; i<d; i++){
01187         sample[i] += random_gen->gaussian_mu_sigma(0, noiseSD);
01188     } 
01189 }
01190 
01193 Vec TransformationLearner::returnPredictedFrom(Vec source,
01194                                                int transformIdx)const
01195 {
01196     Vec sample;
01197     sample.resize(inputSpaceDim);
01198     if(transformIdx <0)
01199         generatePredictedFrom(source,sample);
01200     else
01201         generatePredictedFrom(source,sample,transformIdx);
01202     return sample;
01203 }
01204 
01206 void TransformationLearner::batchGeneratePredictedFrom(const Vec & center,
01207                                                         Mat & samples)const
01208 {
01209     PLASSERT(center.length() ==inputSpaceDim);
01210     PLASSERT(samples.width() ==inputSpaceDim);
01211     int l = samples.length();
01212     for(int i=0; i<l; i++)
01213     {
01214         Vec v = samples(i);
01215         generatePredictedFrom(center, v);
01216     }
01217 }
01218 
01221 void TransformationLearner::batchGeneratePredictedFrom(const Vec & center,
01222                                                         Mat & samples,
01223                                                         int transformIdx)const
01224 {
01225     PLASSERT(center.length() ==inputSpaceDim);
01226     PLASSERT(samples.width() ==inputSpaceDim);
01227     int l = samples.length();
01228     for(int i=0; i<l; i++)
01229     {
01230         Vec v = samples(i);
01231         generatePredictedFrom(center, v,transformIdx);
01232     }  
01233 }
01234 
01235 //Generates n samples from center and returns them stored in a matrix
01236 //    (generation process = 1) choose a transformation (*),
01237 //                          2) apply it on center
01238 //                          3) add noise)
01239 // - (*) if transformIdx>=0, we always use the corresponding transformation
01240 Mat TransformationLearner::returnGeneratedSamplesFrom(Vec center,
01241                                                       int n,
01242                                                       int transformIdx)const
01243 {
01244     Mat samples = Mat(n,inputSpaceDim);
01245     if(transformIdx<0)
01246         batchGeneratePredictedFrom(center,samples);
01247     else
01248         batchGeneratePredictedFrom(center,samples,transformIdx);
01249     return samples;
01250 }
01251 
01253 int TransformationLearner::pickTransformIdx() const
01254 {
01255     
01256     Vec probaTransformDistribution ;
01257     probaTransformDistribution.resize(nbTransforms);
01258     for(int i=0; i<nbTransforms; i++){
01259         probaTransformDistribution[i]=PROBA_weight(transformDistribution[i]);
01260     }
01261     int w= random_gen->multinomial_sample(probaTransformDistribution);
01262     return w;
01263 }
01264 
01268 int TransformationLearner::pickNeighborIdx() const
01269 {
01270     
01271     return random_gen->uniform_multinomial_sample(trainingSetLength);
01272 }
01273 
01274 
01300 void TransformationLearner::treeDataSet(const Vec & root,
01301                                         int deepness,
01302                                         int branchingFactor,
01303                                         Mat & dataPoints,
01304                                         int transformIdx)const
01305 {
01306 
01307     PLASSERT(root.length() == inputSpaceDim);
01308 
01309     //we look at the length of the given matrix dataPoint ;
01310     int nbDataPoints;
01311     if(branchingFactor == 1)
01312         nbDataPoints = deepness + 1;  
01313     else nbDataPoints = int((1- pow(1.0*branchingFactor,deepness + 1.0))
01314                             /
01315                             (1 - branchingFactor));
01316     dataPoints.resize(nbDataPoints,inputSpaceDim);
01317     
01318     //root = first element in the matrix dataPoints
01319     dataPoints(0) << root;
01320   
01321     //generate the other data points 
01322     int centerIdx=0 ;
01323     for(int dataIdx=1; dataIdx < nbDataPoints ; dataIdx+=branchingFactor){
01324         
01325         Vec v = dataPoints(centerIdx);
01326         Mat m = dataPoints.subMatRows(dataIdx, branchingFactor);
01327         if(transformIdx>=0){
01328             batchGeneratePredictedFrom(v,m,transformIdx);
01329         }
01330         else{
01331             batchGeneratePredictedFrom(v,m);
01332         } 
01333         centerIdx ++ ;
01334     }  
01335 }
01336 
01337 Mat TransformationLearner::returnTreeDataSet(Vec root,
01338                                              int deepness,
01339                                              int branchingFactor,
01340                                              int transformIdx)const
01341 {
01342     Mat dataPoints;
01343     treeDataSet(root,deepness,branchingFactor, dataPoints);
01344     return dataPoints;
01345 }
01346 
01347 
01351 void TransformationLearner::sequenceDataSet(const Vec & start,
01352                                             int n,
01353                                             Mat & dataPoints,
01354                                             int transformIdx)const
01355 {
01356     treeDataSet(start,n-1,1,dataPoints , transformIdx);
01357 }
01358 
01359 Mat TransformationLearner::returnSequenceDataSet(Vec start,
01360                                                  int n,
01361                                                  int transformIdx)const
01362 {
01363     Mat dataPoints;
01364     sequenceDataSet(start,n,dataPoints,transformIdx);
01365     return dataPoints;
01366 }
01367 
01368 
01369 
01370 
01372 
01373 
01375 Vec TransformationLearner::returnTrainingPoint(int idx)const
01376 {
01377     
01378     Vec v,temp;
01379     real w;
01380     v.resize(inputSpaceDim);
01381     train_set->getExample(idx, v, temp, w);
01382     return v;
01383     
01384 }
01385  
01386 
01388 TVec<ReconstructionCandidate> TransformationLearner::returnReconstructionCandidates(int targetIdx)const
01389 {
01390    
01391     int startIdx = targetIdx * nbTargetReconstructions;  
01392     return reconstructionSet.subVec(startIdx, 
01393                                     nbTargetReconstructions).copy();
01394 }
01395 
01396 
01399 Mat TransformationLearner::returnReconstructions(int targetIdx)const
01400 {
01401     Mat reconstructions = Mat(nbTargetReconstructions,inputSpaceDim);
01402     int candidateIdx = targetIdx*nbTargetReconstructions;
01403     int neighborIdx, transformIdx;
01404     for(int i=0; i<nbTargetReconstructions; i++){
01405         neighborIdx = reconstructionSet[candidateIdx].neighborIdx;
01406         transformIdx= reconstructionSet[candidateIdx].transformIdx;
01407         Vec neighbor;
01408         neighbor.resize(inputSpaceDim);
01409         seeTrainingPoint(neighborIdx, neighbor);
01410         Vec v = reconstructions(i);
01411         applyTransformationOn(transformIdx, neighbor, v);
01412         candidateIdx ++;
01413     }
01414     return reconstructions; 
01415 }
01416 
01419 Mat TransformationLearner::returnNeighbors(int targetIdx)const
01420 {
01421     int candidateIdx = targetIdx*nbTargetReconstructions;
01422     int neighborIdx;
01423     Mat neighbors = Mat(nbTargetReconstructions, inputSpaceDim);
01424     for(int i=0; i<nbTargetReconstructions; i++){
01425         neighborIdx = reconstructionSet[candidateIdx].neighborIdx;
01426         Vec neighbor;
01427         neighbor.resize(inputSpaceDim);
01428         seeTrainingPoint(neighborIdx, neighbor);
01429         neighbors(i) << neighbor;
01430         candidateIdx++;
01431     }
01432     return neighbors;
01433 }
01434 
01435 
01437 Mat TransformationLearner::returnTransform(int transformIdx)const
01438 {
01439     return transforms[transformIdx].copy();    
01440 }
01441 
01445 Mat TransformationLearner::returnAllTransforms()const
01446 {
01447     return transformsSet.copy();    
01448 }
01449 
01450 
01452 
01453 
01454 
01457 void TransformationLearner::seeTargetReconstructionSet(int targetIdx, 
01458                                                        TVec<ReconstructionCandidate> & targetReconstructionSet)const
01459 {
01460     int startIdx = targetIdx *nbTargetReconstructions;
01461     targetReconstructionSet = reconstructionSet.subVec(startIdx, 
01462                                                        nbTargetReconstructions); 
01463 }
01464 
01465 
01466 // stores the 'idx'th training data point into 'dst'
01467 void TransformationLearner::seeTrainingPoint(const int idx, Vec & dst)const
01468 {
01469     train_set->getExample(idx, dst,stp_v,stp_w);
01470 }
01471 
01472 
01474 
01476     
01477 
01480 real TransformationLearner::gamma_sample(real alpha, real beta)const
01481 {
01482   real c,x,u,d,v;
01483   c = 1.0/3.0;
01484   d = alpha - c ;
01485   do{
01486       x = random_gen->gaussian_01();
01487       u = random_gen->uniform_sample();    
01488       v = pow((1 + x/(pow(9*d , 0.5)))  ,3.0);
01489   }
01490   while(pl_log(u) < 0.5*pow(x,2) + d - d*v + d*pl_log(v));
01491   return d*v/beta;   
01492 }
01493 
01494 
01495 
01496 
01498 
01499 
01501     
01502 
01507 void TransformationLearner::dirichlet_sample(real alpha, Vec & sample)const{
01508     int d = sample.length();
01509     real sum = 0;
01510     for(int i=0;i<d;i++){
01511         sample[i]=gamma_sample(alpha);
01512         sum += sample[i];
01513     }
01514     for(int i=0;i<d;i++){
01515         sample[i]/=sum;
01516     }
01517 }
01518 
01519 Vec TransformationLearner::return_dirichlet_sample(real alpha)const
01520 {
01521     Vec sample ;
01522     sample.resize(inputSpaceDim);
01523     dirichlet_sample(alpha, sample);
01524     return sample;
01525 }
01526 
01527 
01528 
01529 /*void TransformationLearner::dirichlet_sample(const Vec & alphas,
01530                                         Vec & samples)
01531 {
01532     //TODO
01533 }
01534 Vec TransformationLearner::return_dirichlet_sample(Vec alphas)
01535 {
01536     //TODO
01537     return Vec();
01538 }
01539 */
01540 
01541 
01542 
01544 
01545 
01547 void TransformationLearner::normalizeTargetWeights(int targetIdx,
01548                                                    real totalWeight)
01549 {
01550     real w;
01551     int startIdx = targetIdx * nbTargetReconstructions;
01552     int endIdx = startIdx + nbTargetReconstructions;
01553     for(int candidateIdx =startIdx; candidateIdx<endIdx; candidateIdx++){
01554         w = reconstructionSet[candidateIdx].weight;
01555         reconstructionSet[candidateIdx].weight =  DIV_weights(w,totalWeight);
01556     }
01557 }
01558 
01560 real TransformationLearner::randomWeight()const
01561 {  
01562     real w = random_gen->uniform_sample();
01563     return INIT_weight((w + minimumProba)/(1.0 + minimumProba));
01564 }
01565 
01568 real TransformationLearner::INIT_weight(real initValue)const
01569 {
01570     return pl_log(initValue);
01571 }
01572 
01575 real TransformationLearner::PROBA_weight(real weight)const
01576 {
01577     return exp(weight); 
01578 }
01579 
01586 real TransformationLearner::DIV_weights(real numWeight,
01587                                         real denomWeight)const
01588 {
01589     return numWeight - denomWeight;
01590 }
01591 
01592 
01598 real TransformationLearner::MULT_INVERSE_weight(real weight)const
01599 {
01600     
01601     return -1*weight;
01602 }
01603 
01609 real TransformationLearner::MULT_weights(real weight1,real weight2)const
01610 {
01611     
01612     return weight1 + weight2 ;
01613 }
01614 
01619 real TransformationLearner::SUM_weights(real weight1,
01620                                         real weight2)const
01621 {
01622     
01623     return logadd(weight1,weight2);
01624 }
01625 
01626 
01627 
01630 real TransformationLearner::updateReconstructionWeight(int candidateIdx) 
01631 {
01632     int targetIdx = reconstructionSet[candidateIdx].targetIdx;
01633     int neighborIdx = reconstructionSet[candidateIdx].neighborIdx;
01634     int transformIdx = reconstructionSet[candidateIdx].transformIdx;
01635     
01636     real w = computeReconstructionWeight(targetIdx,
01637                                          neighborIdx,
01638                                          transformIdx);
01639     reconstructionSet[candidateIdx].weight = w;
01640     return w; 
01641 }
01642 
01644 real TransformationLearner::updateReconstructionWeight(int candidateIdx, 
01645                                                        const Vec & target,
01646                                                        const Vec & neighbor,
01647                                                        int transformIdx,
01648                                                        Vec & predictedTarget){
01649     
01650     real w = computeReconstructionWeight(target,
01651                                          neighbor,
01652                                          transformIdx,
01653                                          predictedTarget);
01654     reconstructionSet[candidateIdx].weight = w;
01655     return w;
01656 }
01657 
01658 
01659 real TransformationLearner::computeReconstructionWeight(const ReconstructionCandidate & gc)const
01660 {
01661     return computeReconstructionWeight(gc.targetIdx,
01662                                        gc.neighborIdx,
01663                                        gc.transformIdx);
01664 }
01665 real TransformationLearner::computeReconstructionWeight(int targetIdx,
01666                                                         int neighborIdx,
01667                                                         int transformIdx)const
01668 {
01669 
01670     Vec target(inputSpaceDim);
01671     seeTrainingPoint(targetIdx,target);
01672     return computeReconstructionWeight(target,
01673                                        neighborIdx,
01674                                        transformIdx);
01675 }
01676 real TransformationLearner::computeReconstructionWeight(const Vec & target,
01677                                                         int neighborIdx,
01678                                                         int transformIdx)const
01679 {
01680     Vec neighbor(inputSpaceDim);
01681     seeTrainingPoint(neighborIdx, neighbor);
01682     return computeReconstructionWeight(target,neighbor,transformIdx);
01683 }
01684 
01685 real TransformationLearner::computeReconstructionWeight(const Vec & target,
01686                                                         const Vec & neighbor,
01687                                                         int transformIdx )const
01688 {
01689     Vec predictedTarget(inputSpaceDim);
01690     return computeReconstructionWeight(target, neighbor, transformIdx,predictedTarget);
01691 }
01692 
01693 real TransformationLearner::computeReconstructionWeight(const Vec & target,
01694                                                         const Vec & neighbor,
01695                                                         int transformIdx,
01696                                                         Vec & predictedTarget)const
01697 {
01698     applyTransformationOn(transformIdx, neighbor, predictedTarget);
01699     real factor = -1/(2*noiseVariance);
01700     real w = factor*powdistance(target, predictedTarget);
01701     return MULT_weights(w, transformDistribution[transformIdx]);      
01702 }
01703 
01704 
01706 void TransformationLearner::applyTransformationOn(int transformIdx,
01707                                                  const Vec & src,
01708                                                  Vec & dst)const
01709 {
01710     if(transformFamily==TRANSFORM_FAMILY_LINEAR){
01711         Mat m  = transforms[transformIdx];
01712         //transposeProduct(dst,m,src); 
01713         product(dst,m,src);
01714         if(withBias){
01715             dst += biasSet(transformIdx);
01716         }
01717     }
01718     else{ //transformFamily == TRANSFORM_FAMILY_LINEAR_INCREMENT
01719         Mat m = transforms[transformIdx];
01720         //transposeProduct(dst,m,src);
01721         product(dst,m,src);
01722         dst += src;
01723         if(withBias){
01724             dst += biasSet(transformIdx);
01725         }
01726     }
01727 }
01728 
01734 bool  TransformationLearner::isWellDefined(Vec & distribution)const
01735 {  
01736     if(nbTransforms != distribution.length()){
01737         return false;
01738     }
01739     real sum = 0;
01740     real proba;
01741     for(int i=0; i<nbTransforms;i++){
01742         proba = PROBA_weight(distribution[i]);
01743         if(proba < 0 || proba >1){
01744             return false;
01745         }
01746         sum += proba;
01747     }
01748     return sum == 1;    
01749 }
01750 
01751 
01753 
01755 void TransformationLearner::initEStep(){
01756     if(initializationMode == INIT_MODE_DEFAULT){
01757         initEStepA();
01758     }
01759     else
01760         initEStepB();
01761 }
01762 
01765 //1)for each target,
01766 //  a)find the neighbors
01767 //  b)for each neighbor, consider all the possible transformations
01768 //2)compute the weights of all the reconstruction candidates using 
01769 //  the current value of the parameters to learn 
01770 void TransformationLearner::initEStepB(){
01771     initEStepA();
01772     smallEStep();    
01773 }
01774 
01775 
01777 //for each target:
01778 //1)find the neighbors (we use euclidean distance as an heuristic)
01779 //2)for each neighbor, assign a random weight to each possible transformation
01780 void TransformationLearner::initEStepA()
01781 {
01782    
01783     priority_queue< pair< real,int > > pq = priority_queue< pair< real,int > >();
01784     
01785     real totalWeight;
01786     int candidateIdx=0,targetStartIdx, neighborIdx;
01787     
01788     //for each point in the training set i.e. for each target point,
01789     for(int targetIdx = 0; targetIdx < trainingSetLength ;targetIdx++){
01790         
01791         //finds the nearest neighbors and keep them in a priority queue 
01792         findNearestNeighbors(targetIdx, pq);
01793         
01794         //expands those neighbors in the dataset:
01795         //(i.e. for each neighbor, creates one entry per transformation and 
01796         //assignsit a positive random weight)
01797         
01798         totalWeight = INIT_weight(0);
01799         targetStartIdx = candidateIdx;
01800         for(int k = 0; k < nbNeighbors; k++){
01801             neighborIdx = pq.top().second;
01802             pq.pop();
01803             totalWeight =
01804                 SUM_weights(totalWeight,
01805                             expandTargetNeighborPairInReconstructionSet(targetIdx, 
01806                                                                         neighborIdx,
01807                                                                         candidateIdx));
01808             candidateIdx += nbTransforms;
01809         }
01810         //normalizes the  weights of all the entries created for the target 
01811         //point
01812         normalizeTargetWeights(targetIdx,totalWeight);
01813     }
01814 
01815 }
01816 
01817 
01822 real TransformationLearner::expandTargetNeighborPairInReconstructionSet(int targetIdx,
01823                                                                         int neighborIdx,
01824                                                                         int candidateStartIdx)
01825 {
01826     int candidateIdx = candidateStartIdx;
01827     real weight, totalWeight = INIT_weight(0);  
01828     for(int transformIdx=0; transformIdx<nbTransforms; transformIdx ++){
01829        
01830         weight = randomWeight(); 
01831         totalWeight = SUM_weights(totalWeight,weight);
01832         reconstructionSet[candidateIdx] = ReconstructionCandidate(targetIdx, 
01833                                                                   neighborIdx,
01834                                                                   transformIdx,
01835                                                                   weight);
01836     
01837         candidateIdx ++;
01838     }
01839     return totalWeight;    
01840 }
01841 
01842 
01846 void TransformationLearner::findNearestNeighbors(int targetIdx,
01847                                                  priority_queue< pair< real, int > > & pq)
01848 {
01849     
01850     //we want an empty queue
01851     PLASSERT(pq.empty()); 
01852   
01853     //capture the target from his index in the training set
01854     seeTrainingPoint(targetIdx, fnn_target);
01855     
01856     //for each potential neighbor,
01857     real dist;    
01858     for(int i=0; i<trainingSetLength; i++){
01859         if(i != targetIdx){ //(the target cannot be his own neighbor)
01860             //computes the distance to the target
01861             seeTrainingPoint(i, fnn_neighbor);
01862             dist = powdistance(fnn_target, fnn_neighbor); 
01863             //if the distance is among "nbNeighbors" smallest distances seen,
01864             //keep it until to see a closer neighbor. 
01865             if(int(pq.size()) < nbNeighbors){
01866                 pq.push(pair<real,int>(dist,i));
01867             }
01868             else if (dist < pq.top().first){
01869                 pq.pop();
01870                 pq.push(pair<real,int>(dist,i));
01871             }
01872             else if(dist == pq.top().first){
01873                 if(random_gen->uniform_sample() >0.5){
01874                     pq.pop();
01875                     pq.push(pair<real,int>(dist,i));
01876                 }
01877             }
01878         }
01879     }    
01880 }
01881 
01883 
01886 void TransformationLearner::EStep()
01887 {
01888     if(largeEStepAPeriod > 0  && stage % largeEStepAPeriod == largeEStepAOffset){
01889         largeEStepA();
01890     }
01891     if(largeEStepBPeriod>0 && stage % largeEStepBPeriod == largeEStepBOffset){
01892         largeEStepB();
01893     }
01894     smallEStep(); 
01895 }
01896 
01897 
01899 
01903 void TransformationLearner::largeEStepA()
01904 {
01905     priority_queue< ReconstructionCandidate > pq =  
01906         priority_queue< ReconstructionCandidate >();
01907     real totalWeight= INIT_weight(0);
01908     int candidateIdx=0;
01909     
01910     //for each point in the training set i.e. for each target point,
01911     for(int targetIdx = 0; targetIdx < trainingSetLength ; targetIdx++){
01912         
01913         //finds the best weighted triples and keep them in a priority queue 
01914         findBestTargetReconstructionCandidates(targetIdx, pq);
01915         //store those triples in the dataset:
01916         totalWeight = INIT_weight(0);
01917         for(int k=0; k < nbTargetReconstructions; k++){
01918             reconstructionSet[candidateIdx] = pq.top(); 
01919             totalWeight = SUM_weights(pq.top().weight, totalWeight);
01920             pq.pop();         
01921             candidateIdx ++;
01922         }
01923         
01924         //normalizes the  weights of all the entries created for the 
01925         //target point;
01926         normalizeTargetWeights(targetIdx,totalWeight);
01927     } 
01928 }
01929 
01930 
01935 void TransformationLearner::findBestTargetReconstructionCandidates(int targetIdx,
01936                                                                    priority_queue< ReconstructionCandidate > & pq)
01937 {
01938     //we want an empty queue
01939     PLASSERT(pq.empty()); 
01940     
01941     real weight;
01942     seeTrainingPoint(targetIdx, fbtrc_target);
01943     //for each potential neighbor
01944     for(int neighborIdx=0; neighborIdx<trainingSetLength; neighborIdx++){
01945         if(neighborIdx != targetIdx){
01946             seeTrainingPoint(neighborIdx, fbtrc_neighbor);
01947             for(int transformIdx=0; transformIdx<nbTransforms; transformIdx++){
01948                 weight = computeReconstructionWeight(fbtrc_target, 
01949                                                      fbtrc_neighbor, 
01950                                                      transformIdx,
01951                                                      fbtrc_predictedTarget);
01952                 
01953                 //if the weight is among "nbEntries" biggest weight seen,
01954                 //keep it until to see a bigger neighbor. 
01955                 if(int(pq.size()) < nbTargetReconstructions){
01956                     pq.push(ReconstructionCandidate(targetIdx,
01957                                                     neighborIdx,
01958                                                     transformIdx,
01959                                                     weight));  
01960                 }
01961                 else if (weight > pq.top().weight){ 
01962                     pq.pop();
01963                     pq.push(ReconstructionCandidate(targetIdx,
01964                                                     neighborIdx,
01965                                                     transformIdx,
01966                                                     weight));
01967                 }
01968                 else if (weight == pq.top().weight){
01969                     if(random_gen->uniform_sample()>0.5){
01970                         pq.pop();
01971                         pq.push(ReconstructionCandidate(targetIdx,
01972                                                         neighborIdx,
01973                                                         transformIdx,
01974                                                         weight));
01975                     }
01976                 }
01977             }
01978         }     
01979     }
01980 }
01981 
01982 
01983 
01985 
01986 
01990 void TransformationLearner::largeEStepB()
01991 {
01992     priority_queue< ReconstructionCandidate > pq;
01993     
01994     real totalWeight , weight;
01995     int candidateIdx=0 ;
01996     
01997     //for each point in the training set i.e. for each target point,
01998     for(int targetIdx =0; targetIdx<trainingSetLength ;targetIdx++){
01999         
02000         totalWeight = INIT_weight(0);
02001         for(int transformIdx=0; transformIdx < nbTransforms; transformIdx ++){
02002             //finds the best weighted triples   them in a priority queue 
02003             findBestWeightedNeighbors(targetIdx,transformIdx, pq);
02004             //store those neighbors in the dataset
02005             for(int k=0; k<nbNeighbors; k++){
02006                 reconstructionSet[candidateIdx] = pq.top();
02007                 weight = pq.top().weight;
02008                 totalWeight = SUM_weights( weight, totalWeight);
02009                 pq.pop();
02010                 candidateIdx ++;
02011             }
02012         }
02013       //normalizes the  weights of all the entries created for the target 
02014       //point;
02015         normalizeTargetWeights(targetIdx,totalWeight);
02016     }
02017 }
02018    
02019 
02023 void TransformationLearner::findBestWeightedNeighbors(int targetIdx,
02024                                                       int transformIdx,
02025                                                       priority_queue< ReconstructionCandidate > & pq)
02026 {
02027     //we want an empty queue
02028     PLASSERT(pq.empty()); 
02029     
02030     real weight; 
02031     seeTrainingPoint(targetIdx, fbwn_target);
02032     //for each potential neighbor
02033     for(int neighborIdx=0; neighborIdx<trainingSetLength; neighborIdx++){
02034         if(neighborIdx != targetIdx){ //(the target cannot be his own neighbor)
02035             seeTrainingPoint(neighborIdx, fbwn_neighbor);
02036             weight = computeReconstructionWeight(fbwn_target, 
02037                                                  fbwn_neighbor, 
02038                                                  transformIdx,
02039                                                  fbwn_predictedTarget);
02040             //if the weight of the triple is among the "nbNeighbors" biggest 
02041             //seen,keep it until see a bigger weight. 
02042             if(int(pq.size()) < nbNeighbors){
02043                 pq.push(ReconstructionCandidate(targetIdx,
02044                                                 neighborIdx, 
02045                                                 transformIdx,
02046                                                 weight));
02047             }
02048             else if (weight > pq.top().weight){
02049                 pq.pop();
02050                 pq.push(ReconstructionCandidate(targetIdx,
02051                                                 neighborIdx,
02052                                                 transformIdx,
02053                                                 weight));
02054             }
02055             else if (weight == pq.top().weight){
02056                 if(random_gen->uniform_sample() > 0.5){
02057                     pq.pop();
02058                     pq.push(ReconstructionCandidate(targetIdx,
02059                                                     neighborIdx,
02060                                                     transformIdx,
02061                                                     weight));
02062                 }
02063             }
02064         }
02065     }   
02066 }
02067 
02068 
02069 
02071 
02072 
02074 void TransformationLearner::smallEStep()
02075 {
02076     int candidateIdx =0;
02077     int  targetIdx = reconstructionSet[candidateIdx].targetIdx;
02078     real totalWeight = INIT_weight(0);
02079     seeTrainingPoint(targetIdx,ses_target);
02080     
02081     while(candidateIdx < nbReconstructions){
02082         
02083         seeTrainingPoint(reconstructionSet[candidateIdx].neighborIdx, ses_neighbor);
02084         totalWeight = SUM_weights(totalWeight,
02085                                   updateReconstructionWeight(candidateIdx,
02086                                                              ses_target,
02087                                                              ses_neighbor,
02088                                                              reconstructionSet[candidateIdx].transformIdx,
02089                                                              ses_predictedTarget));
02090         candidateIdx ++;
02091     
02092         if(candidateIdx == nbReconstructions)
02093             normalizeTargetWeights(targetIdx,totalWeight);
02094         else if(targetIdx != reconstructionSet[candidateIdx].targetIdx){
02095             normalizeTargetWeights(targetIdx, totalWeight);
02096             totalWeight = INIT_weight(0);
02097             targetIdx = reconstructionSet[candidateIdx].targetIdx;
02098             seeTrainingPoint(targetIdx, ses_target);
02099         }
02100     }    
02101 }
02102 
02103 // M STEP
02104 
02105 
02108 void TransformationLearner::MStep()
02109 {
02110     if(noiseVariancePeriod > 0 && stage%noiseVariancePeriod == noiseVarianceOffset)
02111         MStepNoiseVariance();
02112     if(transformDistributionPeriod > 0 && 
02113        stage % transformDistributionPeriod == transformDistributionOffset)
02114         MStepTransformDistribution();
02115     if(biasPeriod > 0 && stage % biasPeriod == biasOffset)
02116         MStepBias();
02117     if(stage % transformsPeriod == transformsOffset){
02118         if(emphasisOnDiversity){
02119             int t  = ((stage - transformsOffset)/transformsPeriod) % nbTransforms;
02120             MStepTransformationDiv(t);
02121         }
02122         else{
02123             MStepTransformations();
02124         }    
02125     }
02126 }
02127 
02130 void TransformationLearner::MStepTransformDistribution()
02131 {
02132     MStepTransformDistributionMAP(transformDistributionAlpha);
02133 }
02134 
02139 void TransformationLearner::MStepTransformDistributionMAP(real alpha)
02140 {
02141     newDistribution.fill(INIT_weight(0));
02142         
02143     int transformIdx;
02144     real weight;
02145     for(int idx =0 ;idx < nbReconstructions ; idx ++){
02146         transformIdx = reconstructionSet[idx].transformIdx;
02147         weight = reconstructionSet[idx].weight;
02148         newDistribution[transformIdx] = 
02149             SUM_weights(newDistribution[transformIdx],
02150                         weight);
02151     }
02152 
02153     real addFactor = INIT_weight(alpha - 1);
02154     real divisionFactor = INIT_weight(nbTransforms*(alpha - 1) + trainingSetLength); 
02155 
02156     for(int k=0; k<nbTransforms ; k++){
02157         newDistribution[k]= DIV_weights(SUM_weights(addFactor,
02158                                                     newDistribution[k]),
02159                                         divisionFactor);
02160     }
02161     transformDistribution << newDistribution ;
02162 }
02163 
02166 /*-Notation: we will use the symbol _T to indicate the transposition operation
02167   -To better understand how the algorithm  is working, 
02168    see the NOTE (in comments) placed right after the method.
02169    (The method is called often and has to be efficient. Some details
02170    of the implantation might be a bit unclear for this reason.) */
02171 void TransformationLearner::MStepTransformations()
02172 {
02173     
02174     //set the m dXd matrices Ck and Bk , k in{1, ...,m} to 0.
02175     B_C.clear();
02176     
02177     real lambda = 1.0*noiseVariance/transformsVariance;
02178     for(int idx=0 ; idx<nbReconstructions ; idx++){
02179         
02180         //catch a view on the next entry of our dataset, that is, a  triple:
02181         //(target_idx, neighbor_idx, transformation_idx)
02182         
02183         real p = PROBA_weight(reconstructionSet[idx].weight);
02184   
02185         //catch the target and neighbor points from the training set
02186         
02187         seeTrainingPoint(reconstructionSet[idx].targetIdx, mst_target);
02188         seeTrainingPoint(reconstructionSet[idx].neighborIdx, mst_neighbor);
02189         
02190         int t = reconstructionSet[idx].transformIdx;
02191         
02192         mst_v << mst_target;
02193         if(transformFamily == TRANSFORM_FAMILY_LINEAR_INCREMENT){
02194             mst_v = mst_v - mst_neighbor;
02195         }
02196         if(withBias){
02197             mst_v = mst_v - biasSet(t);
02198         }
02199         //at the end, we want that matrix C[t] represents
02200         //the matrix ( (NeighborPart(t)_T)W(NeighborPart(t)) + lambdaI ) transposed. 
02201         externalProductScaleAcc(C[t], mst_neighbor, mst_neighbor, p);
02202         
02203         //at the end, that matrix B[t] represents
02204         //the matrix (NeighborPart(t)_T)W(TargetPart(t)) transposed.
02205         //externalProductScaleAcc(B[t], neighbor, v,p);
02206         externalProductScaleAcc(B[t],mst_v,mst_neighbor,p);
02207     }
02208     
02209  
02210     for(int t=0; t<nbTransforms; t++){
02211         addToDiagonal(C[t],lambda);
02212         //transforms[t] << solveLinearSystem(C[t], B[t]);  
02213         lapackSolveLinearSystem(C[t],B[t],mst_pivots);
02214         transforms[t] << B[t];
02215         
02216     }  
02217 }
02218 /*NOTE : MStepTransformations() 
02219  -Notation: we will use the symbol _T to indicate the transposition operation
02220  -The algorithm consist in solving a linear system :
02221       for each t, we want to find
02222                  transforms(t)=X ,
02223                  with X such that : E(t)X_T=D(t)
02224                  Here, 
02225                  E(t) = (NeighborPart(t)_T)W(NeighborPart(t)) + lambda(I)
02226                  D(t) = (NeighborPart(t)_T)W(TargetPart(t))
02227  -We will compute E(t)_T = C(t) , and D(t)_T =B(t)
02228   in the algorithm. It is necessary to compute directly those transposed 
02229   versions of E(t) and D(t) to solve the linear system with efficiency. 
02230  -once the computations of C(t) and B(t) are done,
02231   we use a method from plapack package to solve our linear system 
02232     lapackSolveLinearSystem(A_T,B_T, pivots):
02233     Here is a copy of the description of the method: 
02234  -------------------------------------------------------------------------------------------------------
02235    Solves AX = B
02236   This is a simple wrapper over the lapack routine. It expects At and Bt (transposes of A and B) as input, 
02237   as well as storage for resulting pivots vector of ints of same length as A.
02238   The call overwrites Bt, putting the transposed solution Xt in there,
02239   and At is also overwritten to contain the factors L and U from the factorization A = P*L*U; 
02240   (the unit diagonal elements of L  are  not stored).
02241   The lapack status is returned:
02242   = 0:  successful exit
02243   < 0:  if INFO = -i, the i-th argument had an illegal value
02244   > 0:  if INFO = i, U(i,i) is  exactly  zero.   The factorization has been completed, 
02245   but the factor U is exactly singular, so the solution could not be computed.
02246 --------------------------------------------------------------------------------------------------------
02247 -Like you can see, we have to transmit the transposed versions of matrices
02248  E(t) and D(t) to the procedure, that is, matrices C(t) and B(t)
02249  -The matrix transforms(t)=X will be stored in B(t) at the end of the algorithm
02250 */
02251 
02252  
02253 
02259 void TransformationLearner::MStepTransformationDiv(int transformIdx){
02260     //set the m dXd matrices Ck and Bk , k in{1, ...,m} to 0.
02261     mstd_B.clear();
02262     mstd_C.clear();
02263     mstd_D.clear();
02264     
02265     for(int t=0; t<nbTransforms ; t++){
02266         if(t != transformIdx){
02267             mstd_D += transforms[t];
02268         }
02269     }
02270     mstd_D *= -2*diversityFactor*noiseVariance;
02271    
02272 
02273     //real lambda = noiseVariance*(1.0/transformsVariance -2*(nbTransforms - 1)*diversityFactor);
02274     real lambda = noiseVariance/transformsVariance ;
02275     
02276     for(int idx=0 ; idx<nbReconstructions ; idx++){
02277         
02278         //catch a view on the next entry of our dataset, that is, a  triple:
02279         //(target_idx, neighbor_idx, transformation_idx)
02280         
02281         real p = PROBA_weight(reconstructionSet[idx].weight);
02282   
02283         //catch the target and neighbor points from the training set
02284         
02285         seeTrainingPoint(reconstructionSet[idx].targetIdx, mstd_target);
02286         seeTrainingPoint(reconstructionSet[idx].neighborIdx, mstd_neighbor);
02287         
02288         if( reconstructionSet[idx].transformIdx == transformIdx){
02289             mstd_v << mstd_target;
02290             if(transformFamily == TRANSFORM_FAMILY_LINEAR_INCREMENT){
02291                 mstd_v = mstd_v - mstd_neighbor;
02292             }
02293      
02294             //at the end, we want that matrix C[t] represents
02295             //the matrix ( (NeighborPart(t)_T)W(NeighborPart(t)) + lambdaI ) transposed. 
02296             externalProductScaleAcc(mstd_C, mstd_neighbor, mstd_neighbor, p);
02297             
02298             //at the end, that matrix B[t] represents
02299             //the matrix (NeighborPart(t)_T)W(TargetPart(t)) transposed.
02300             //externalProductScaleAcc(B[t], neighbor, v,p);
02301             externalProductScaleAcc(mstd_B,mstd_v,mstd_neighbor,p);
02302         }
02303         
02304     }
02305 
02306     addToDiagonal(mstd_C,lambda);
02307     //transforms[t] << solveLinearSystem(C[t], B[t]); 
02308     mstd_B += mstd_D;
02309     lapackSolveLinearSystem(mstd_C,mstd_B, mstd_pivots);
02310     transforms[transformIdx] << mstd_B;
02311     
02312 }
02313 
02314 
02315 
02316 
02317 
02318 
02319 
02322 void TransformationLearner::MStepBias(){
02323     msb_newBiasSet.fill(0);
02324     msb_norms.fill(INIT_weight(0));
02325     int transformIdx;
02326     real proba,weight;
02327     for(int idx=0; idx<nbReconstructions; idx++){
02328         transformIdx = reconstructionSet[idx].transformIdx;
02329         weight = reconstructionSet[idx].weight;
02330         proba = PROBA_weight(weight);
02331         seeTrainingPoint(reconstructionSet[idx].targetIdx,msb_target);
02332         seeTrainingPoint(reconstructionSet[idx].neighborIdx, msb_neighbor);
02333         applyTransformationOn(transformIdx,msb_neighbor, msb_reconstruction);
02334         msb_newBiasSet(transformIdx) += proba*(msb_target - msb_reconstruction);
02335         msb_norms[transformIdx] = SUM_weights(msb_norms[transformIdx],weight);
02336     }
02337     for(int t=0; t<nbTransforms ; t++){
02338         msb_newBiasSet(t) /= ((noiseVariance/transformsVariance) 
02339                               +
02340                               PROBA_weight(msb_norms[t]));
02341     }
02342     biasSet << msb_newBiasSet;   
02343 }
02344 
02345 
02347 void TransformationLearner::MStepNoiseVariance()
02348 {
02349     MStepNoiseVarianceMAP(noiseAlpha,noiseBeta);
02350 }
02351 
02355 void TransformationLearner::MStepNoiseVarianceMAP(real alpha, real beta)
02356 {
02357     
02358     msnvMAP_total_k.fill(0);
02359     int transformIdx;
02360     real proba;
02361     int candidateIdx=0;
02362     for(int targetIdx=0; targetIdx<trainingSetLength; targetIdx ++){
02363         seeTrainingPoint(targetIdx,msnvMAP_target);
02364         for(int idx=0; idx < nbTargetReconstructions; idx++){
02365             transformIdx = reconstructionSet[candidateIdx].transformIdx;
02366             seeTrainingPoint(reconstructionSet[candidateIdx].neighborIdx , msnvMAP_neighbor);
02367             proba = PROBA_weight(reconstructionSet[candidateIdx].weight);
02368             msnvMAP_total_k[transformIdx]+=(proba * reconstructionEuclideanDistance(msnvMAP_target,
02369                                                                                     msnvMAP_neighbor,
02370                                                                                     transformIdx,
02371                                                                                     msnvMAP_reconstruction));
02372             candidateIdx ++;
02373         }
02374     }
02375     noiseVariance = (2*beta + sum(msnvMAP_total_k))/(2*alpha - 2 + trainingSetLength*inputSpaceDim);  
02376         
02377 }
02378  
02381 real TransformationLearner::reconstructionEuclideanDistance(int candidateIdx){
02382     Vec target(inputSpaceDim);
02383     seeTrainingPoint(reconstructionSet[candidateIdx].targetIdx, target);
02384     Vec neighbor(inputSpaceDim);
02385     seeTrainingPoint(reconstructionSet[candidateIdx].neighborIdx,
02386                      neighbor);
02387     Vec reconstruction(inputSpaceDim);
02388     applyTransformationOn(reconstructionSet[candidateIdx].transformIdx,
02389                           neighbor,
02390                           reconstruction);
02391     return powdistance(target, reconstruction);
02392 }
02393 
02394 real TransformationLearner::reconstructionEuclideanDistance(const Vec& target,
02395                                                             const Vec& neighbor,
02396                                                             int transformIdx,
02397                                                             Vec& reconstruction)const
02398 {
02399     applyTransformationOn(transformIdx,
02400                           neighbor,
02401                           reconstruction);
02402     return powdistance(target,reconstruction);
02403 
02404 }
02405 
02406 
02407 
02409 void TransformationLearner::nextStage(){
02410     stage ++;
02411 }
02412 
02413 
02414 } // end of namespace PLearn
02415 
02416 
02417 /*
02418   Local Variables:
02419   mode:c++
02420   c-basic-offset:4
02421   c-file-style:"stroustrup"
02422   c-file-offsets:((innamespace . 0)(inline-open . 0))
02423   indent-tabs-mode:nil
02424   fill-column:79
02425   End:
02426 */
02427 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines