PLearn 0.1
GaussMix.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussMix.cc
00004 //
00005 // Copyright (C) 2003 Julien Keable
00006 // Copyright (C) 2004-2006 University of Montreal
00007 //
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 //
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 //
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 //
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 //
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 //
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 /* *******************************************************
00037  * $Id: GaussMix.cc 10366 2011-05-26 18:41:49Z plearner $
00038  ******************************************************* */
00039 
00041 #include "GaussMix.h"
00042 
00043 #include <limits>
00044 #include <boost/graph/adjacency_list.hpp>
00045 #include <boost/graph/prim_minimum_spanning_tree.hpp>
00046 
00047 #include <plearn/io/load_and_save.h>
00048 #include <plearn/math/Cholesky_utils.h>
00049 #include <plearn/math/pl_erf.h>   
00050 #include <plearn/math/plapack.h>
00051 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00052 //#include <plearn/sys/Profiler.h>
00053 #include <plearn/vmat/FileVMatrix.h>
00054 #include <plearn/vmat/MemoryVMatrix.h>
00055 #include <plearn/vmat/ReorderByMissingVMatrix.h>
00056 #include <plearn/vmat/SubVMatrix.h>
00057 #include <plearn/vmat/VMat_basic_stats.h>
00058 #if 0
00059 #include <plearn/vmat/SortRowsVMatrix.h>
00060 #endif
00061 
00062 namespace PLearn {
00063 using namespace std;
00064 
00065 #define TYPE_UNKNOWN    0
00066 #define TYPE_SPHERICAL  1
00067 #define TYPE_DIAGONAL   2
00068 #define TYPE_GENERAL    3
00069 //#define DIRECTED_HACK
00070 
00072 // GaussMix //
00074 GaussMix::GaussMix():
00075     ptimer(new PTimer()),
00076     type_id(TYPE_UNKNOWN),
00077     previous_predictor_part_had_missing(false),
00078     D(-1),
00079     n_eigen_computed(-1),
00080     nsamples(-1),
00081     alpha_min(1e-6),
00082     efficient_k_median(1),
00083     efficient_k_median_iter(100),
00084     efficient_missing(0),
00085     epsilon(1e-6),
00086     f_eigen(0),
00087     impute_missing(false),
00088     kmeans_iterations(5),
00089     L(1),
00090     max_samples_in_cluster(-1),
00091     min_samples_in_cluster(1),
00092     n_eigen(-1),
00093     sigma_min(1e-6),
00094     type("spherical")
00095 {
00096     // Change the default value of 'nstages' to 10 to make the user aware that
00097     // in general it should be higher than 1.
00098     nstages = 10;
00099     current_training_sample = -1;
00100     previous_training_sample = -2; // Only use efficient_missing in training.
00101     ptimer->newTimer("init_time");
00102     ptimer->newTimer("training_time");
00103 }
00104 
00105 PLEARN_IMPLEMENT_OBJECT(GaussMix,
00106     "Gaussian mixture, either set non-parametrically or trained by EM.",
00107     "GaussMix implements a mixture of L Gaussians.\n"
00108     "There are 3 possible parameterization types:\n"
00109     " - spherical : Gaussians have covariance = diag(sigma^2).\n"
00110     "               Parameter used : sigma.\n"
00111     " - diagonal  : Gaussians have covariance = diag(sigma_1^2...sigma_d^2).\n"
00112     "               Parameter used : diags.\n"
00113     " - general   : Gaussians have an unconstrained covariance matrix.\n"
00114     "               The user specifies the number 'n_eigen' of eigenvectors\n"
00115     "               kept when performing the eigen-decomposition of the\n"
00116     "               the covariance matrix. The remaining eigenvectors are\n"
00117     "               considered as having a fixed eigenvalue equal to the\n"
00118     "               next highest eigenvalue in the decomposition.\n"
00119     "\n"
00120     "Some parameters are common to all 3 types :\n"
00121     " - alpha   : the weight of the Gaussians (= P(j)).\n"
00122     " - center  : the mean of the Gaussians\n"
00123     "\n"
00124     "If a GaussMix is not meant to be trained, its stage should be set to a\n"
00125     "strictly positive value, in order to indicate that it is ready to be\n"
00126     "used (of course, this means all parameters are properly set).\n"
00127     /* TODO Keep this cost? How?
00128     "\n"
00129     "In addition to the usual costs inherited from PDistribution, an additional output\n"
00130     "can be computed by using the character 'p' in the 'outputs_def' option: this will\n"
00131     "return an output containing the posterior log-probabilities P(j|Y,X) of each Gaussian.\n"
00132     */
00133 );
00134 
00136 // declareOptions //
00138 void GaussMix::declareOptions(OptionList& ol)
00139 {
00140     // Build options.
00141 
00142     declareOption(ol, "L", &GaussMix::L, OptionBase::buildoption,
00143         "Number of Gaussians in the mixture.");
00144 
00145     declareOption(ol, "type", &GaussMix::type, OptionBase::buildoption,
00146         "This is the type of covariance matrix for each Gaussian:\n"
00147         "   - spherical : spherical covariance matrix sigma^2 * I\n"
00148         "   - diagonal  : diagonal covariance matrix, given by standard\n"
00149         "                 deviations 'diags'\n"
00150         "   - general   : unconstrained covariance matrix (defined by its\n"
00151         "                 eigenvectors)\n");
00152 
00153     declareOption(ol, "n_eigen", &GaussMix::n_eigen, OptionBase::buildoption,
00154         "If type is 'general', the number of eigenvectors used to compute\n"
00155         "the covariance matrix. The remaining eigenvectors will be given an\n"
00156         "eigenvalue equal to the next highest eigenvalue. If set to -1, all\n"
00157         "eigenvectors will be kept.");
00158 
00159     declareOption(ol, "f_eigen", &GaussMix::f_eigen, OptionBase::buildoption,
00160         "If == 0, is ignored. Otherwise, it must be a fraction representing\n"
00161         "the fraction of eigenvectors that are kept (this value overrides\n"
00162         "any setting of the 'n_eigen' option).");
00163 
00164     declareOption(ol, "efficient_missing", &GaussMix::efficient_missing,
00165                                            OptionBase::buildoption,
00166         "If not 0, computations with missing values will be more efficient:\n"
00167         "- 1: most efficient method\n"
00168         "- 2: less naive method than 0, where we compute the matrices\n"
00169         "     only once per missing pattern (not as good as 1)\n"
00170         "- 3: same as 1, but using inverse variance lemma instead of\n"
00171         "     Cholesky (could be more efficient after all)");
00172 
00173     declareOption(ol, "efficient_k_median", &GaussMix::efficient_k_median,
00174                                             OptionBase::buildoption,
00175         "Starting number of clusters used.");
00176 
00177     declareOption(ol, "max_samples_in_cluster",
00178                   &GaussMix::max_samples_in_cluster,
00179                   OptionBase::buildoption,
00180         "Maximum number of samples allowed in each cluster (ignored if -1).\n"
00181         "More than 'efficient_k_median' clusters may be used in order to\n"
00182         "comply with this constraint.");
00183 
00184     declareOption(ol, "min_samples_in_cluster",
00185                   &GaussMix::min_samples_in_cluster,
00186                   OptionBase::buildoption,
00187         "Minimum number of samples allowed in each cluster.\n"
00188         "Less than 'efficient_k_median' clusters may be used in order to\n"
00189         "comply with this constraint.");
00190 
00191     declareOption(ol, "efficient_k_median_iter",
00192                                             &GaussMix::efficient_k_median_iter,
00193                                             OptionBase::buildoption,
00194         "Maximum number of iterations in k-median.");
00195 
00196     declareOption(ol, "impute_missing", &GaussMix::impute_missing,
00197                                         OptionBase::buildoption,
00198         "If true, missing values will be imputed their conditional mean when\n"
00199         "computing the covariance matrix. Note that even if the current\n"
00200         "default value of this option is false, the 'true' EM algorithm\n"
00201         "requires it to be set to true.");
00202 
00203     declareOption(ol, "kmeans_iterations", &GaussMix::kmeans_iterations,
00204                                            OptionBase::buildoption,
00205         "Maximum number of iterations performed in initial K-means.");
00206 
00207     declareOption(ol, "alpha_min", &GaussMix::alpha_min,
00208                                    OptionBase::buildoption,
00209         "The minimum weight for each Gaussian. Whenever a Gaussian falls\n"
00210         "below 'alpha_min', it is replaced by a new Gaussian. Note that a\n"
00211         "Gaussian may be replaced only once per stage (to avoid cycles).");
00212 
00213     declareOption(ol,"sigma_min", &GaussMix::sigma_min,
00214                                   OptionBase::buildoption,
00215         "The minimum standard deviation allowed. In all computations, any\n"
00216         "standard deviation below 'sigma_min' (or variance below its square)\n"
00217         "will be replaced by 'sigma_min' (or its square). This regularizes\n"
00218         "the Gaussians (and should not be too high nor too small).");
00219 
00220     declareOption(ol, "epsilon", &GaussMix::epsilon, OptionBase::buildoption,
00221         "A small number to check for near-zero probabilities.");
00222 
00223     // Learnt options.
00224 
00225     declareOption(ol, "alpha", &GaussMix::alpha, OptionBase::learntoption,
00226         "Coefficients of the Gaussians. They sum to 1 and are positive:\n"
00227         "they can be interpreted as priors P(Gaussian j).");
00228 
00229     declareOption(ol, "center", &GaussMix::center, OptionBase::learntoption,
00230         "Mean of each Gaussian, stored in rows.");
00231 
00232     declareOption(ol, "sigma", &GaussMix::sigma, OptionBase::learntoption,
00233         "The standard deviation in all directions, for 'spherical' type.\n");
00234 
00235     declareOption(ol,"diags", &GaussMix::diags, OptionBase::learntoption,
00236         "Element (j,k) is the standard deviation of Gaussian j on the k-th\n"
00237         "dimension, for 'diagonal' type.");
00238 
00239     declareOption(ol, "eigenvalues", &GaussMix::eigenvalues,
00240                                      OptionBase::learntoption,
00241         "The eigenvalues associated with the principal eigenvectors:\n"
00242         "element (j,k) is the k-th eigenvalue of the j-th Gaussian.");
00243 
00244     declareOption(ol, "eigenvectors", &GaussMix::eigenvectors,
00245                                       OptionBase::learntoption,
00246         "Principal eigenvectors of each Gaussian (for the 'general' type).\n"
00247         "Element j is a matrix whose row k is the k-th eigenvector of the\n"
00248         "j-th Gaussian.");
00249 
00250     /*
00251     declareOption(ol, "log_coeff", &GaussMix::log_coeff,
00252                                    OptionBase::nosave,
00253         "The logarithm of the constant part in the joint Gaussian density:\n"
00254         "log(1/sqrt(2*pi^D * Det(C))).");
00255 
00256     declareOption(ol, "log_p_j_x", &GaussMix::log_p_j_x,
00257                                    OptionBase::nosave,
00258         "The logarithm of p(j|x), where x is the input part.");
00259 
00260     declareOption(ol, "p_j_x", &GaussMix::p_j_x, OptionBase::nosave,
00261         "The probability p(j|x), where x is the input part (it is computed\n"
00262         "by exp(log_p_j_x).");
00263     */
00264 
00265     declareOption(ol, "n_eigen_computed", &GaussMix::n_eigen_computed,
00266                                           OptionBase::learntoption,
00267         "Actual number of principal components computed with 'general' type.\n"
00268         "It is either equal to the dimension (when all components are\n"
00269         "computed), or to n_eig+1.");
00270 
00271     declareOption(ol, "D", &GaussMix::D, OptionBase::learntoption,
00272         "Number of dimensions of the joint distribution.");
00273 
00274     /*
00275     // We should not have to save this (it is computed in setPredictor).
00276     declareOption(ol, "center_y_x", &GaussMix::center_y_x, OptionBase::nosave,
00277         "The expectation E[Y | x] for each Gaussian.");
00278         */
00279 
00280     /*
00281     declareOption(ol, "log_p_x_j_alphaj", &GaussMix::log_p_x_j_alphaj,
00282                                           OptionBase::learntoption,
00283         "The logarithm of p(x|j) * alpha_j, where x is the input part.");
00284 
00285     declareOption(ol, "n_tries", &GaussMix::n_tries, OptionBase::learntoption,
00286         "Element i is the number of iterations needed to complete\n"
00287         "stage i (if > 1, some Gaussian has been replaced).");
00288 
00289     declareOption(ol, "nsamples", &GaussMix::nsamples,
00290                                   OptionBase::learntoption,
00291         "Number of samples in the training set.");
00292 
00293     declareOption(ol, "training_time", &GaussMix::training_time,
00294                                        OptionBase::learntoption,
00295         "Time spent in training the model. If initially set to a negative\n"
00296         "value, it will not be updated during training.");
00297 
00298     declareOption(ol, "conditional_updating_time",
00299                       &GaussMix::conditional_updating_time,
00300                       OptionBase::learntoption,
00301         "Time spent in updating from conditional sorting. If initially set\n"
00302         "to a negative value, it will not be updated during training.");
00303     */
00304 
00305     // Now call the parent class' declareOptions
00306     inherited::declareOptions(ol);
00307 }
00308 
00310 // build //
00312 void GaussMix::build()
00313 {
00314     inherited::build();
00315     build_();
00316 }
00317 
00319 // build_ //
00321 void GaussMix::build_()
00322 {
00323     // Check type value.
00324     if (type == "spherical") {
00325         type_id = TYPE_SPHERICAL;
00326     } else if (type == "diagonal") {
00327         type_id = TYPE_DIAGONAL;
00328     } else if (type == "general") {
00329         type_id = TYPE_GENERAL;
00330     } else
00331         PLERROR("In GaussMix::build_ - Type '%s' is unknown", type.c_str());
00332 
00333     // Special case for the 'f_eigen' option: 1 means we keep everything.
00334     PLASSERT( f_eigen >= 0 && f_eigen <= 1 );
00335     if (is_equal(f_eigen, 1))
00336         n_eigen = -1;
00337 
00338     // Guess values for 'D' and 'n_eigen_computed' if they are not provided
00339     // (this could be the case for instance when specifying 'by hand' the
00340     // parameters of the mixture of Gaussians).
00341     // Make also a few checks to ensure all values are coherent.
00342     if (stage > 0) {
00343         PLASSERT( D == -1 || D == center.width() );
00344         if (D == -1)
00345             D = center.width();
00346         PLASSERT( n_eigen_computed == -1 ||
00347                 n_eigen_computed == eigenvalues.width() );
00348         if (n_eigen_computed == -1)
00349             n_eigen_computed = eigenvalues.width();
00350         PLASSERT( n_eigen == -1 || n_eigen_computed <= n_eigen + 1 );
00351         PLASSERT( n_eigen_computed <= D );
00352     }
00353 
00354     // Make sure everything is correctly resized before using the object.
00355     resizeDataBeforeUsing();
00356 
00357     // If the learner is ready to be used, we need to precompute the logarithm
00358     // of the constant coefficient of each Gaussian.
00359     if (stage > 0)
00360         precomputeAllGaussianLogCoefficients();
00361 
00362     // Make GaussMix-specific operations for conditional distributions.
00363     GaussMix::setPredictorPredictedSizes(predictor_size, predicted_size, false);
00364     GaussMix::setPredictor(predictor_part, false);
00365 }
00366 
00368 // changeOptions //
00370 void GaussMix::changeOptions(const map<string,string>& name_value)
00371 {
00372     // When 'n_eigen' is changed for a learner that is already trained, we need
00373     // to call forget(), otherwise some asserts may fail during a subsequent
00374     // build.
00375     if (stage > 0 && (name_value.find("n_eigen") != name_value.end() ||
00376                       name_value.find("L")       != name_value.end() ||
00377                       name_value.find("seed")    != name_value.end() ||
00378                       name_value.find("sigma_min")!=name_value.end() ||
00379                       name_value.find("type")    != name_value.end() ))
00380         forget();
00381     inherited::changeOptions(name_value);
00382 }
00383 
00385 // computeMeansAndCovariances //
00387 void GaussMix::computeMeansAndCovariances() {
00388     //Profiler::start("computeMeansAndCovariances");
00389     VMat weighted_train_set;
00390     Vec sum_columns(L);
00391     Vec storage_D(D);
00392     columnSum(posteriors, sum_columns);
00393     for (int j = 0; j < L; j++) {
00394         // Build the weighted dataset.
00395         if (sum_columns[j] < epsilon)
00396             PLWARNING("In GaussMix::computeMeansAndCovariances - A posterior "
00397                       "is almost zero");
00398         PLASSERT( !updated_weights(j).hasMissing() );
00399         VMat weights(columnmatrix(updated_weights(j)));
00400         bool use_impute_missing = impute_missing && stage > 0;
00401         VMat input_data = use_impute_missing ? imputed_missing[j]
00402                                              : train_set;
00403 
00404         /*
00405         input_data->saveAMAT("/u/delallea/tmp/input_data_" +
00406                 tostring(this->stage) + ".amat", false, true);
00407         */
00408 
00409         weighted_train_set = new ConcatColumnsVMatrix(
00410             new SubVMatrix(input_data, 0, 0, nsamples, D), weights);
00411         weighted_train_set->defineSizes(D, 0, 1);
00412         Vec center_j = center(j);
00413         if (type_id == TYPE_SPHERICAL) {
00414             computeInputMeanAndVariance(weighted_train_set, center_j,
00415                                                             storage_D);
00416             // TODO Would it be better to use an harmonic mean?
00417             sigma[j] = sqrt(mean(storage_D));
00418             if (isnan(sigma[j]))
00419                 PLERROR("In GaussMix::computeMeansAndCovariances - A "
00420                         "standard deviation is 'nan'");
00421         } else if (type_id == TYPE_DIAGONAL ) {
00422             computeInputMeanAndStddev(weighted_train_set, center_j,
00423                                                           storage_D);
00424             diags(j) << storage_D;
00425             if (storage_D.hasMissing())
00426                 PLERROR("In GaussMix::computeMeansAndCovariances - A "
00427                         "standard deviation is 'nan'");
00428         } else {
00429             PLASSERT( type_id == TYPE_GENERAL );
00430             //Profiler::start("computeInputMeanAndCovar");
00431             computeInputMeanAndCovar(weighted_train_set, center_j, covariance);
00432             //Profiler::end("computeInputMeanAndCovar");
00433             if (use_impute_missing) {
00434                 // Need to add the extra contributions.
00435                 if (sum_of_posteriors[j] > 0) {
00436                     error_covariance[j] /= sum_of_posteriors[j];
00437                     PLASSERT( covariance.isSymmetric() );
00438                     PLASSERT( error_covariance[j].isSymmetric() );
00439                     covariance += error_covariance[j];
00440                     PLASSERT( covariance.isSymmetric() );
00441                 }
00442             }
00443             if (center_j.hasMissing()) {
00444                 // There are features missing in all points assigned to this
00445                 // Gaussian. We sample a new random value for these features.
00446                 for (int i = 0; i < D; i++)
00447                     if (is_missing(center_j[i])) {
00448                         center_j[i] =
00449                             random_gen->gaussian_mu_sigma(mean_training  [i],
00450                                                           stddev_training[i]);
00451 #ifdef BOUNDCHECK
00452                         // Sanity check: the corresponding row and column in
00453                         // the covariance matrix should be missing.
00454                         for (int k = 0; k < D; k++) {
00455                             if (!is_missing(covariance(i,k)) ||
00456                                     !is_missing(covariance(k,i)))
00457                                 PLERROR(
00458                                     "In GaussMix::computeMeansAndCovariances -"
00459                                     " Expected a missing value in covariance");
00460                         }
00461 #endif
00462                     }
00463             }
00464             if (covariance.hasMissing())
00465                 // The covariance matrix may have some missing values when not
00466                 // enough samples were seen to get simultaneous observations of
00467                 // some pairs of features.
00468                 // Those missing values are replaced with zero.
00469                 for (int i = 0; i < D; i++)
00470                     for (int k = i; k < D; k++)
00471                         if (is_missing(covariance(i,k))) {
00472                             covariance(i,k) = 0;
00473                             PLASSERT( is_missing(covariance(k,i)) ||
00474                                     covariance(k,i) == 0 );
00475                             covariance(k,i) = 0;
00476                         }
00477 #ifdef BOUNDCHECK
00478 
00479             // At this point there should be no more missing values.
00480             if (covariance.hasMissing() || center.hasMissing())
00481                 PLERROR("In GaussMix::computeMeansAndCovariances - Found "
00482                         "missing values when computing weighted mean and "
00483                         "covariance");
00484 #endif
00485             // 'eigenvals' points to the eigenvalues of the j-th Gaussian.
00486             Vec eigenvals = eigenvalues(j);
00487             eigenVecOfSymmMat(covariance, n_eigen_computed, eigenvals,
00488                                                             eigenvectors[j]);
00489             PLASSERT( eigenvals.length() == n_eigen_computed );
00490 
00491             // Currently, the returned covariance matrix returned is not
00492             // guaranteed to be semi-definite positive. Thus we need to ensure
00493             // it is the case, by thresholding the negative eigenvalues to the
00494             // smallest positive one.
00495             for (int i = n_eigen_computed - 1; i >= 0; i--)
00496                 if (eigenvals[i] > 0) {
00497                     for (int k = i + 1; k < n_eigen_computed; k++)
00498                         eigenvals[k] = eigenvals[i];
00499                     break;
00500                 }
00501         }
00502     }
00503     //Profiler::end("computeMeansAndCovariances");
00504 }
00505 
00507 // updateCholeskyFromPrevious //
00509 void GaussMix::updateCholeskyFromPrevious(
00510         const Mat& chol_previous, Mat& chol_updated,
00511         const Mat& full_matrix,
00512         const TVec<int>& indices_previous, const TVec<int>& indices_updated)
00513         const
00514 {
00515     //Profiler::start("updateCholeskyFromPrevious");
00516     static TVec<bool> is_previous;
00517     static TVec<bool> is_updated;
00518     static TVec<int> indices_new;
00519     static Vec new_row;
00520     PLASSERT( chol_previous.length() == indices_previous.length() );
00521     if (indices_updated.isEmpty()) {
00522         // All values are missing: the returned matrix should be empty.
00523         chol_updated.resize(0, 0);
00524         //Profiler::end("updateCholeskyFromPrevious");
00525         return;
00526     }
00527     // Initialization.
00528     int n = chol_previous.length();
00529     int max_indice = -1;
00530     if (!indices_previous.isEmpty())
00531         max_indice = max(max_indice, max(indices_previous));
00532     if (!indices_updated.isEmpty())
00533         max_indice = max(max_indice, max(indices_updated));
00534     PLASSERT( max_indice >= 0 );
00535     is_updated.resize(max_indice + 1);
00536     is_previous.resize(max_indice + 1);
00537     is_updated.fill(false);
00538     is_previous.fill(false);
00539     indices_new.resize(0);
00540     // Find which indices need to be kept or appended.
00541     int p = indices_updated.length();
00542     for (int i = 0; i < p; i++)
00543         is_updated[indices_updated[i]] = true;
00544     for (int i = 0; i < n; i++)
00545         is_previous[indices_previous[i]] = true;
00546     // Delete unused dimensions from the Cholesky decomposition.
00547     //Profiler::start("updateCholeskyFromPrevious - Removing dimensions");
00548     chol_updated.resize(n, n);
00549     chol_updated << chol_previous;
00550     for (int i = n - 1; i >= 0; i--) {
00551         int dim_to_del = indices_previous[i];
00552         if (is_updated[dim_to_del])
00553             indices_new.append(dim_to_del);
00554         else
00555             choleskyRemoveDimension(chol_updated, i);
00556     }
00557     //Profiler::end("updateCholeskyFromPrevious - Removing dimensions");
00558     // Need to swap 'indices_new' since these indices have been added in the
00559     // opposite order.
00560     indices_new.swap();
00561     // Now add dimensions that were not here previously.
00562     //Profiler::start("updateCholeskyFromPrevious - Adding dimensions");
00563     for (int i = 0; i < p; i++)
00564         if (!is_previous[indices_updated[i]]) {
00565             int dim_to_add = indices_updated[i];
00566             indices_new.append(dim_to_add);
00567             int q = indices_new.length();
00568             new_row.resize(q);
00569             for (int j = 0; j < q; j++)
00570                 new_row[j] = full_matrix(dim_to_add, indices_new[j]);
00571             choleskyAppendDimension(chol_updated, new_row);
00572         }
00573     //Profiler::end("updateCholeskyFromPrevious - Adding dimensions");
00574     // Finally update the 'indices_updated' list.
00575     indices_updated << indices_new;
00576     //Profiler::end("updateCholeskyFromPrevious");
00577 }
00578 
00580 // updateInverseVarianceFromPrevious //
00582 // TODO Document
00583 // Also, note that 'ind_dst' is going to be modified to reflect the reordering
00584 // of dimensions..
00585 void GaussMix::updateInverseVarianceFromPrevious(
00586         const Mat& src, Mat& dst, const Mat& full,
00587         const TVec<int>& ind_src, const TVec<int>& ind_dst,
00588         real* src_log_det, real* dst_log_det) const
00589 {
00590     // The i-th element of 'is_src' ('is_dst') indicates whether the i-th
00591     // dimension is in the 'ind_src'('ind_dst') vector.
00592     static TVec<bool> is_src;
00593     static TVec<bool> is_dst;
00594 
00595     static TVec<int> dim_common;    // List of common dimensions.
00596     static TVec<int> dim_src_only;  // List of dimensions only in 'src'.
00597     static TVec<int> dim_dst_only;  // List of dimensions only in 'dst'.
00598     // List of dimensions in 'src' after it has been reordered so that the
00599     // common dimensions are first.
00600     static TVec<int> dim_reordered_src;
00601 
00602     // A copy of the 'src' matrix, but whose dimensions have been swapped to
00603     // match the order in 'dim_reordered_src'.
00604     static Mat src_reordered;
00605     
00606     // Temporary storage matrices.
00607     static Mat tmp;
00608     static Mat tmp2; 
00609 
00610     // This matrix will contain the inverse covariance corresponding to the
00611     // removal of dimensions who do not appear in 'ind_dst' (thus, it is the
00612     // final result if no dimension has to be added, otherwise it is just an
00613     // intermediate result).
00614     static Mat dst_only_removed;
00615 
00616     // Matrix storing the bottom-right part of the reordered source matrix
00617     // (corresponding to dimensions that need to be removed).
00618     static Mat B3;
00619 
00620     // Work matrices.
00621     static Mat W;
00622     static Mat P;
00623     static Mat B;
00624 
00625     // Safety checks.
00626     PLASSERT( src.length() == ind_src.length() );
00627     PLASSERT( (src_log_det  &&  dst_log_det) ||
00628             (!src_log_det && !dst_log_det) );
00629 
00630     if (src_log_det)
00631         // Initialize destination determinant to the source one.
00632         *dst_log_det = *src_log_det;
00633 
00634     int n = ind_src.length();
00635     int p = ind_dst.length();
00636     // int m = full.length();
00637     dst.resize(p, p);
00638     // Analyze the indices vectors.
00639     int max_indice = -1;
00640     if (!ind_src.isEmpty())
00641         max_indice = max(max_indice, max(ind_src));
00642     if (!ind_dst.isEmpty())
00643         max_indice = max(max_indice, max(ind_dst));
00644     // Note that 'max_indice' can be -1. This can currently happen if
00645     // the first sample in a cluster has no missing value.
00646     // In this case there is nothing to do: 'dst' will be empty.
00647     is_dst.resize(max_indice + 1);
00648     is_src.resize(max_indice + 1);
00649     is_dst.fill(false);
00650     is_src.fill(false);
00651     for (int i = 0; i < p; i++)
00652         is_dst[ind_dst[i]] = true;
00653     for (int i = 0; i < n; i++)
00654         is_src[ind_src[i]] = true;
00655     // Build the source inverse covariance matrix where dimensions are
00656     // reordered so that the first dimensions are those in common between
00657     // source and destination.
00658     dim_common.resize(0);
00659     dim_src_only.resize(0);
00660     dim_reordered_src.resize(n);
00661     for (int i = 0; i < n; i++) {
00662         if (is_dst[ind_src[i]])
00663             dim_common.append(i);
00664         else
00665             dim_src_only.append(i);
00666     }
00667     dim_reordered_src.subVec(0, dim_common.length()) << dim_common;
00668     dim_reordered_src.subVec(dim_common.length(), dim_src_only.length())
00669         << dim_src_only;
00670     src_reordered.setMod(dim_reordered_src.length());
00671     src_reordered.resize(dim_reordered_src.length(),
00672                          dim_reordered_src.length());
00673     for (int i = 0; i < dim_reordered_src.length(); i++) {
00674         int dim_reordered_src_i = dim_reordered_src[i];
00675         src_reordered(i, i) = src(dim_reordered_src_i, dim_reordered_src_i);
00676         for (int j = i + 1; j < dim_reordered_src.length(); j++) {
00677             real elem_i_j = src(dim_reordered_src_i, dim_reordered_src[j]);
00678             src_reordered(i, j) = elem_i_j;
00679             src_reordered(j, i) = elem_i_j;
00680         }
00681     }
00682         /* Old code doing the same thing.
00683     tmp.resize(src.length(), dim_reordered_src.length());
00684     // TODO Not efficient! Optimize!
00685     selectColumns(src, dim_reordered_src, tmp);
00686     src_reordered.resize(n, n);
00687     selectRows(tmp, dim_reordered_src, src_reordered);
00688     */
00689 
00690     // Remove the dimensions that are not present in the destination
00691     // matrix.
00692     int n_common = dim_common.length();
00693     dst_only_removed.resize(n_common, n_common);
00694     int n_src_only = dim_src_only.length();
00695     if (n_src_only == 0) {
00696         // Nothing to remove.
00697         dst_only_removed << src_reordered;
00698     } else {
00699         // Compute the matrix corresponding to the removal of the dimensions
00700         // that appear only in the source matrix.
00701         PLASSERT( src_reordered.isSymmetric() );
00702         Mat B1 = src_reordered.subMat(0, 0, n_common, n_common);
00703         Mat B2 = src_reordered.subMat(0, n_common, n_common, n_src_only);
00704         B3.setMod(n_src_only);
00705         B3.resize(n_src_only, n_src_only);
00706         B3 << src_reordered.subMat(n_common, n_common, n_src_only, n_src_only);
00707         PLASSERT( B3.isSymmetric() );
00708         dst_only_removed << B1;
00709         tmp.resize(B3.length(), B3.width());
00710         matInvert(B3, tmp);
00711         // Another commented-out assert due to it possibly failing (numerical
00712         // imprecisions).
00713         // PLASSERT( tmp.isSymmetric(false) );
00714         fillItSymmetric(tmp);
00715         tmp2.resize(tmp.length(), B2.length());
00716         productTranspose(tmp2, tmp, B2);
00717         tmp.resize(B2.length(), tmp2.width());
00718         product(tmp, B2, tmp2);
00719         dst_only_removed -= tmp;
00720         // Another commented-out assert due to it possibly failing (numerical
00721         // imprecisions).
00722         // PLASSERT( dst_only_removed.isSymmetric(false, true) );
00723         fillItSymmetric(dst_only_removed);
00724         // Update the log-determinant if needed.
00725         if (src_log_det) {
00726             //Profiler::start("det when removing");
00727             *dst_log_det += det(src_reordered.subMat(n_common, n_common,
00728                                                      n_src_only, n_src_only),
00729                                 true);
00730             //Profiler::end("det when removing");
00731         }
00732     }
00733 
00734     // At this point, the dimensions that are not present in the
00735     // destination matrix have been removed. Now, we need to add the
00736     // dimensions that need to be added (those that are present in the
00737     // destination but not in the source).
00738     dim_dst_only.resize(0);
00739     for (int i = 0; i < p; i++)
00740         if (!is_src[ind_dst[i]])
00741             dim_dst_only.append(ind_dst[i]);
00742     int n_dst_only = dim_dst_only.length();
00743     // Reorder properly the indices in 'ind_dst': first the common indices,
00744     // then those only in 'dst'.
00745     for (int i = 0; i < n_common; i++)
00746         ind_dst[i] = ind_src[dim_common[i]];
00747     for (int i = 0; i < n_dst_only; i++)
00748         ind_dst[i + n_common] = dim_dst_only[i];
00749     // Replace dimensions in 'src' by dimensions in the full matrix.
00750     for (int i = 0; i < dim_common.length(); i++)
00751         dim_common[i] = ind_src[dim_common[i]];
00752     if (n_dst_only == 0) {
00753         // No dimension to add.
00754         dst << dst_only_removed;
00755     } else {
00756         // TODO This is probably not very efficient, and could be optimized.
00757         tmp.resize(full.length(), dim_dst_only.length());
00758         selectColumns(full, dim_dst_only, tmp);
00759         W.resize(dim_common.length(), tmp.width());
00760         selectRows(tmp, dim_common, W);
00761         P.resize(dim_dst_only.length(), tmp.width());
00762         selectRows(tmp, dim_dst_only, P);
00763         B.resize(W.width(), dst_only_removed.width());
00764         transposeProduct(B, W, dst_only_removed);
00765         tmp.setMod(W.width());
00766         tmp.resize(B.length(), W.width());
00767         // It can happen that n_common == 0, i.e. there are no common
00768         // dimensions. In such a case, P contains the desired covariance.
00769         if (n_common > 0) {
00770             product(tmp, B, W);
00771             negateElements(tmp);
00772         } else
00773             tmp.fill(0);
00774         tmp += P;
00775         tmp2.resize(tmp.length(), tmp.width());
00776         // Commented-out as it may cause an unwanted crash.
00777         // PLASSERT( tmp.isSymmetric(false, true) );
00778         fillItSymmetric(tmp);
00779         matInvert(tmp, tmp2);
00780         // Commented-out as it may cause an unwanted crash.
00781         // PLASSERT( tmp2.isSymmetric(false) );
00782         fillItSymmetric(tmp2);
00783         dst.subMat(n_common, n_common, n_dst_only, n_dst_only) << tmp2;
00784         if (n_common > 0) {
00785             tmp.resize(B.width(), tmp2.width());
00786             transposeProduct(tmp, B, tmp2);
00787             tmp2.resize(tmp.length(), B.width());
00788             product(tmp2, tmp, B);
00789             negateElements(tmp);
00790             dst.subMat(0, n_common, n_common, n_dst_only) << tmp;
00791             Mat dst_top_left = dst.subMat(0, 0, n_common, n_common);
00792             dst_top_left << tmp2;
00793             dst_top_left += dst_only_removed;
00794         }
00795         // Update the log-determinant if needed.
00796         if (src_log_det) {
00797             //Profiler::start("det when adding");
00798             *dst_log_det -= det(dst.subMat(n_common,   n_common,
00799                                            n_dst_only, n_dst_only), true);
00800             //Profiler::end("det when adding");
00801         }
00802     }
00803     // Ensure 'dst' is symmetric, since we did not fill the bottom-left block.
00804     fillItSymmetric(dst);
00805 }
00806 
00808 // addToCovariance //
00810 void GaussMix::addToCovariance(const Vec& y, int j,
00811                                const Mat& cov, real post)
00812 {
00813     //Profiler::start("addToCovariance");
00814     PLASSERT( y.length() == cov.length() && y.length() == cov.width() );
00815     PLASSERT( n_predictor == 0 );
00816     PLASSERT( impute_missing );
00817     static TVec<int> coord_missing;
00818     static Mat inv_cov_y_missing;
00819     static Mat H_inv_tpl;
00820     static TVec<int> ind_inv_tpl;
00821     static Mat H_inv_tot;
00822     static TVec<int> ind_inv_tot;
00823 
00824     coord_missing.resize(0);
00825     for (int k = 0; k < y.length(); k++)
00826         if (is_missing(y[k]))
00827             coord_missing.append(k);
00828 
00829     Mat& inv_cov_y = joint_inv_cov[j];
00830     if (previous_training_sample == -1) {
00831         int n_missing = coord_missing.length();
00832         inv_cov_y_missing.setMod(n_missing);
00833         inv_cov_y_missing.resize(n_missing, n_missing);
00834         for (int k = 0; k < n_missing; k++)
00835             for (int q = 0; q < n_missing; q++)
00836                 inv_cov_y_missing(k,q) =
00837                     inv_cov_y(coord_missing[k], coord_missing[q]);
00838         cond_var_inv_queue.resize(1);
00839         Mat& cond_inv = cond_var_inv_queue[0];
00840         cond_inv.resize(inv_cov_y_missing.length(), inv_cov_y_missing.width());
00841         matInvert(inv_cov_y_missing, cond_inv);
00842         // Take care of numerical imprecisions that may cause the inverse not
00843         // to be exactly symmetric.
00844         PLASSERT( cond_inv.isSymmetric(false, true) );
00845         fillItSymmetric(cond_inv);
00846         indices_inv_queue.resize(1);
00847         TVec<int>& ind = indices_inv_queue[0];
00848         ind.resize(n_missing);
00849         ind << coord_missing;
00850     }
00851 
00852     int path_index =
00853         sample_to_path_index[current_training_sample];
00854     int queue_index;
00855     if (spanning_use_previous[current_cluster][path_index])
00856         queue_index = cond_var_inv_queue.length() - 1;
00857     else
00858         queue_index = cond_var_inv_queue.length() - 2;
00859 
00860     H_inv_tpl = cond_var_inv_queue[queue_index];
00861     ind_inv_tpl = indices_inv_queue[queue_index];
00862     int n_inv_tpl = H_inv_tpl.length();
00863     H_inv_tot.resize(n_inv_tpl, n_inv_tpl);
00864     ind_inv_tot = coord_missing;
00865 
00866     bool same_covariance = no_missing_change[current_training_sample];
00867 
00868     if (!same_covariance)
00869         updateInverseVarianceFromPrevious(H_inv_tpl, H_inv_tot,
00870                 joint_inv_cov[j], ind_inv_tpl, ind_inv_tot);
00871 
00872     Mat* the_H_inv = same_covariance ? &H_inv_tpl : &H_inv_tot;
00873     TVec<int>* the_ind_inv = same_covariance? &ind_inv_tpl : &ind_inv_tot;
00874 
00875     // Add this matrix (weighted by the coefficient 'post') to the given 'cov'
00876     // full matrix.
00877     for (int i = 0; i < the_ind_inv->length(); i++) {
00878         int the_ind_inv_i = (*the_ind_inv)[i];
00879         for (int k = 0; k < the_ind_inv->length(); k++)
00880             cov(the_ind_inv_i, (*the_ind_inv)[k]) += post * (*the_H_inv)(i, k);
00881     }
00882 
00883     bool cannot_free =
00884         !spanning_can_free[current_cluster][path_index];
00885     if (cannot_free)
00886         queue_index++;
00887     cond_var_inv_queue.resize(queue_index + 1);
00888     indices_inv_queue.resize(queue_index + 1);
00889 
00890     static Mat dummy_mat;
00891     H_inv_tpl = dummy_mat;
00892 
00893     if (!same_covariance || cannot_free) {
00894         Mat& M = cond_var_inv_queue[queue_index];
00895         M.resize(H_inv_tot.length(), H_inv_tot.width());
00896         M << H_inv_tot;
00897         TVec<int>& ind = indices_inv_queue[queue_index];
00898         ind.resize(the_ind_inv->length());
00899         ind << *the_ind_inv;
00900     }
00901 
00902     //Profiler::end("addToCovariance");
00903 }
00904 
00906 // computeLogLikelihood //
00908 real GaussMix::computeLogLikelihood(const Vec& y, int j, bool is_predictor) const
00909 {
00910     //Profiler::start("computeLogLikelihood");
00911     static int size;    // Size of the vector whose density is computed.
00912     // Index where we start (usually 0 when 'is_predictor', and 'n_predictor'
00913     // otherwise).
00914     static int start;
00915     // Storage of mean.
00916     static Vec mu_y;
00917     static Vec mu;
00918 
00919     static Vec diag_j; // Points to the standard deviations of Gaussian j.
00920 
00921     // Used to point to the correct eigenvalues / eigenvectors.
00922     static Vec eigenvals;
00923     static Mat eigenvecs;
00924 
00925     // Stuff when there are missing values: we need to do a lot more
00926     // computations (with the current rather dumb implementation).
00927     static Vec mu_y_missing;
00928     static Mat cov_y_missing;
00929     static Mat dummy_storage;
00930     static TVec<Mat> covs_y_missing;
00931     static TVec<Vec> mus_y_missing;
00932     static Vec y_missing;
00933     static Vec eigenvals_missing;
00934     static TVec<Vec> eigenvals_allj_missing;
00935     static Mat* eigenvecs_missing;
00936     static Mat eigenvecs_missing_storage;
00937     static TVec<Mat> eigenvecs_allj_missing;
00938     static TVec<int> non_missing;
00939     static Mat work_mat1, work_mat2;
00940     static Mat eigenvalues_x_miss;
00941     static TVec<Mat> eigenvectors_x_miss;
00942     static Mat full_cov;
00943     static Mat cov_x_j;
00944     static Vec y_non_missing;
00945     static Vec center_non_missing;
00946     static Mat cov_y_x;
00947 
00948     // Dummy matrix and vector to release some storage pointers so that some
00949     // matrices can be resized.
00950     static Mat dummy_mat;
00951     static Vec dummy_vec;
00952 
00953     eigenvecs_missing = &eigenvecs_missing_storage;
00954 
00955     Mat* the_cov_y_missing = &cov_y_missing;
00956     Vec* the_mu_y_missing = &mu_y_missing;
00957 
00958     // Will contain the final result (the desired log-likelihood).
00959     real log_likelihood;
00960 
00961     if (type_id == TYPE_SPHERICAL || type_id == TYPE_DIAGONAL) {
00962         // Easy case: the covariance matrix is diagonal.
00963         if (is_predictor) {
00964             size = n_predictor;
00965             start  = 0;
00966         } else {
00967             size = n_predicted;
00968             start = n_predictor;
00969         }
00970         mu_y = center(j).subVec(start, size);
00971         if (type_id == TYPE_DIAGONAL) {
00972             PLASSERT( diags.length() == L && diags.width() == n_predictor+n_predicted );
00973             diag_j = diags(j).subVec(start, size);
00974         }
00975         log_likelihood = 0;
00976         // x   ~= N(mu_x, cov (diagonal))
00977         // y|x ~= N(mu_y, cov (diagonal))
00978         for (int k = 0; k < size; k++)
00979             if (!is_missing(y[k])) {
00980                 real stddev =
00981                     type_id == TYPE_SPHERICAL ? sigma[j]
00982                                               : diag_j[k];
00983                 stddev = max(sigma_min, stddev);
00984                 log_likelihood +=
00985                     gauss_log_density_stddev(y[k], mu_y[k], stddev);
00986             }
00987     } else {
00988         PLASSERT( type_id == TYPE_GENERAL );
00989         log_likelihood = 0; // Initialize resultresult  to zero.
00990         // TODO Put both cases (n_predictor == 0 and other) in same code (they are
00991         // very close one to each other).
00992         if (n_predictor == 0) {
00993             // Simple case: there is no predictor part.
00994             PLASSERT( !is_predictor );
00995             PLASSERT( y.length() == n_predicted );
00996 
00997             // When not in training mode, 'previous_training_sample' is set to
00998             // -2, and 'current_training_sample' is set to -1.
00999             // In such a case, it is not necessary to do all computations.
01000             // TODO It would be good to have one single flag for both lines
01001             // below. Maybe current_training_sample != -1 would be enough?
01002             bool eff_missing = (efficient_missing == 1 ||
01003                                 efficient_missing == 3        ) &&
01004                                (previous_training_sample != -2);
01005             bool imp_missing = impute_missing &&
01006                                (current_training_sample != -1);
01007             bool eff_naive_missing = (efficient_missing == 2) &&
01008                                      (current_training_sample != -1);
01009 
01010             if (y.hasMissing() || eff_missing || imp_missing) {
01011                 // TODO This will probably make the 'efficient_missing' method
01012                 // perform slower on data with no missing value. This should be
01013                 // optimized.
01014 
01015                 // We need to recompute almost everything.
01016                 // First the full covariance.
01017                 Mat& cov_y = joint_cov[j];
01018                 Mat* inv_cov_y = impute_missing ? &joint_inv_cov[j] : 0;
01019                 real var_min = square(sigma_min);
01020                 if (stage_joint_cov_computed[j] != this->stage) {
01021                     stage_joint_cov_computed[j] = this->stage;
01022                     cov_y.resize(D, D);
01023                     eigenvals = eigenvalues(j);
01024                     real lambda0 = max(var_min, eigenvals.lastElement());
01025                     cov_y.fill(0);
01026                     Mat& eigenvectors_j = eigenvectors[j];
01027 
01028                     PLASSERT( eigenvectors_j.width() == D );
01029 
01030                     for (int k = 0; k < n_eigen_computed - 1; k++)
01031                         externalProductScaleAcc(
01032                                 cov_y, eigenvectors_j(k), eigenvectors_j(k),
01033                                 max(var_min, eigenvals[k]) - lambda0);
01034 
01035                     for (int i = 0; i < D; i++)
01036                         cov_y(i,i) += lambda0;
01037 
01038                     // By construction, the resulting matrix is symmetric. However,
01039                     // it may happen that it is not exactly the case due to numerical
01040                     // approximations. Thus we ensure it is perfectly symmetric.
01041                     PLASSERT( cov_y.isSymmetric(false) );
01042                     fillItSymmetric(cov_y);
01043 
01044                     if (impute_missing) {
01045                         // We also need to compute the inverse covariance
01046                         // matrix.
01047                         PLASSERT( inv_cov_y );
01048                         inv_cov_y->resize(D, D);
01049                         inv_cov_y->fill(0);
01050                         real l0 = 1 / lambda0;
01051                         for (int k = 0; k < n_eigen_computed - 1; k++)
01052                             externalProductScaleAcc(
01053                                     *inv_cov_y, eigenvectors_j(k),
01054                                     eigenvectors_j(k),
01055                                     1 / max(var_min, eigenvals[k]) - l0);
01056                         for (int i = 0; i < D; i++)
01057                             (*inv_cov_y)(i, i) += l0;
01058                         // For the same reason as above.
01059                         PLASSERT( inv_cov_y->isSymmetric(false) );
01060                         fillItSymmetric(*inv_cov_y);
01061                     }
01062 
01063                     /*
01064                     if (efficient_missing) {
01065                         // Now compute its Cholesky decomposition.
01066                         Mat& chol_cov_y = chol_joint_cov[j];
01067                         choleskyDecomposition(cov_y, chol_cov_y);
01068 
01069                         // And do the same for missing templates.
01070                         TVec<bool> miss_pattern;
01071                         for (int i = 0; i < efficient_k_median; i++) {
01072                             miss_pattern = missing_template(i);
01073                             int n_non_missing = miss_pattern.length();
01074                             non_missing.resize(0);
01075                             for (int k = 0; k < miss_pattern.length(); k++)
01076                                 if (miss_pattern[k])
01077                                     n_non_missing--;
01078                                 else
01079                                     non_missing.append(k);
01080 
01081                             cov_y_missing.resize(n_non_missing, n_non_missing);
01082                             for (int k = 0; k < n_non_missing; k++)
01083                                 for (int q = 0; q < n_non_missing; q++)
01084                                     cov_y_missing(k,q) =
01085                                         cov_y(non_missing[k], non_missing[q]);
01086                             Mat& chol_cov_tpl = chol_cov_template(i, j);
01087                             choleskyDecomposition(cov_y_missing, chol_cov_tpl);
01088                         }
01089                     }
01090                     */
01091                 }
01092                 /*
01093                 // Then extract what we want.
01094                 int tpl_idx;
01095                 TVec<bool> missing_tpl;
01096                 if (efficient_missing) {
01097                 PLASSERT( current_training_sample != -1 );
01098                 tpl_idx =
01099                     sample_to_template[current_training_sample];
01100                 missing_tpl = missing_template(tpl_idx);
01101                 }
01102                 */
01103                 /*
01104                 static TVec<int> com_non_missing, add_non_missing, add_missing;
01105                 com_non_missing.resize(0);
01106                 add_non_missing.resize(0);
01107                 // 'add_missing' will contain those coordinate in the template
01108                 // covariance matrix that need to be deleted (because they are
01109                 // missing in the current template).
01110                 add_missing.resize(0);
01111                 */
01112 
01113                 non_missing.resize(0);
01114                 static TVec<int> coord_missing;
01115                 coord_missing.resize(0);
01116                 // int count_tpl_dim = 0;
01117                 for (int k = 0; k < n_predicted; k++)
01118                     if (!is_missing(y[k]))
01119                         non_missing.append(k);
01120                     else
01121                         coord_missing.append(k);
01122 
01123                 int n_non_missing = non_missing.length();
01124                 if (eff_missing && previous_training_sample == -1) {
01125                     // No previous training sample: we need to compute from
01126                     // scratch the Cholesky decomposition.
01127                     the_cov_y_missing->setMod(n_non_missing);
01128                     the_cov_y_missing->resize(n_non_missing, n_non_missing);
01129                     for (int k = 0; k < n_non_missing; k++)
01130                         for (int q = 0; q < n_non_missing; q++)
01131                             (*the_cov_y_missing)(k,q) =
01132                                 cov_y(non_missing[k], non_missing[q]);
01133                     cholesky_queue.resize(1);
01134                     // pout << "length = " << cholesky_queue.length() << endl;
01135                     Mat& chol = cholesky_queue[0];
01136                     if (efficient_missing == 1)
01137                         choleskyDecomposition(*the_cov_y_missing, chol);
01138                     else {
01139                         PLASSERT( efficient_missing == 3 );
01140                         log_det_queue.resize(1);
01141                         log_det_queue[0] = det(*the_cov_y_missing, true);
01142                         chol.resize(the_cov_y_missing->length(),
01143                                     the_cov_y_missing->length());
01144                         PLASSERT( the_cov_y_missing->isSymmetric() );
01145                         matInvert(*the_cov_y_missing, chol);
01146                         // Commenting-out this assert: it can actually fail due
01147                         // to some numerical imprecisions during matrix
01148                         // inversion, which is a bit annoying.
01149                         // PLASSERT( chol.isSymmetric(false, true) );
01150                         fillItSymmetric(chol);
01151                     }
01152                     indices_queue.resize(1);
01153                     TVec<int>& ind = indices_queue[0];
01154                     ind.resize(n_non_missing);
01155                     ind << non_missing;
01156                 }
01157 
01158                 mu_y = center(j).subVec(0, n_predicted);
01159                 the_mu_y_missing->resize(n_non_missing);
01160                 y_missing.resize(n_non_missing);
01161                 // Fill in first the coordinates which are in the template,
01162                 // then the coordinates specific to this data point.
01163                 /*
01164                 static TVec<int> tot_non_missing;
01165                 if (efficient_missing) {
01166                 tot_non_missing.resize(com_non_missing.length() +
01167                                        add_non_missing.length());
01168                 tot_non_missing.subVec(0, com_non_missing.length())
01169                     << com_non_missing;
01170                 tot_non_missing.subVec(com_non_missing.length(),
01171                                        add_non_missing.length())
01172                     << add_non_missing;
01173                 for (int k = 0; k < tot_non_missing.length(); k++) {
01174                     mu_y_missing[k] = mu_y[tot_non_missing[k]];
01175                     y_missing[k] = y[tot_non_missing[k]];
01176                 }
01177                 }
01178                 */
01179                 if (!eff_missing) {
01180                 if (!eff_naive_missing) {
01181                     dummy_storage.setMod(n_non_missing);
01182                     dummy_storage.resize(n_non_missing, n_non_missing);
01183                     the_cov_y_missing = &dummy_storage;
01184                 } else {
01185                     PLASSERT( efficient_missing == 2 );
01186                     covs_y_missing.resize(L);
01187                     Mat& cov_y_missing_j = covs_y_missing[j];
01188                     cov_y_missing_j.resize(n_non_missing, n_non_missing);
01189                     the_cov_y_missing = &cov_y_missing_j;
01190                     mus_y_missing.resize(L);
01191                     Vec& mu_y_missing_j = mus_y_missing[j];
01192                     mu_y_missing_j.resize(n_non_missing);
01193                     the_mu_y_missing = &mu_y_missing_j;
01194                 }
01195 
01196                 for (int k = 0; k < n_non_missing; k++)
01197                     y_missing[k] = y[non_missing[k]];
01198 
01199                 if (!eff_naive_missing ||
01200                     need_recompute[current_training_sample]) {
01201                 for (int k = 0; k < n_non_missing; k++) {
01202                     (*the_mu_y_missing)[k] = mu_y[non_missing[k]];
01203                     for (int q = 0; q < n_non_missing; q++) {
01204                         (*the_cov_y_missing)(k,q) =
01205                             cov_y(non_missing[k], non_missing[q]);
01206                     }
01207                 }
01208                 }
01209                 }
01210                 /*
01211                 if (n_non_missing == 0) {
01212                     log_likelihood = 0;
01213                 } else {*/
01214                     // Perform SVD of cov_y_missing.
01215                     if (!eff_missing) {
01216                     if (!eff_naive_missing ||
01217                                     need_recompute[current_training_sample]) {
01218                     eigenvals_allj_missing.resize(L);
01219                     eigenvecs_allj_missing.resize(L);
01220                     // TODO We probably do not need this 'cov_backup', since
01221                     // the matrix 'the_cov_y_missing' should not be re-used.
01222                     // Once this is tested and verified, it could be removed
01223                     // for efficiency reasons.
01224                     static Mat cov_backup;
01225                     cov_backup.setMod(the_cov_y_missing->width());
01226                     cov_backup.resize(the_cov_y_missing->length(),
01227                                       the_cov_y_missing->width());
01228                     cov_backup << *the_cov_y_missing;
01229                     eigenVecOfSymmMat(cov_backup, n_non_missing,
01230                             eigenvals_allj_missing[j],
01231                             eigenvecs_allj_missing[j]);
01232 
01233                     PLASSERT( eigenvals_allj_missing[j].length()==n_non_missing);
01234                     PLASSERT( !cov_backup.hasMissing() );
01235                     }
01236                     eigenvals_missing = eigenvals_allj_missing[j];
01237                     eigenvecs_missing = &eigenvecs_allj_missing[j];
01238                     }
01239 
01240                     real log_det = 0;
01241                     static Mat L_tpl;
01242                     static TVec<int> ind_tpl;
01243                     static Mat L_tot;
01244                     static TVec<int> ind_tot;
01245                     int n_tpl = -1;
01246                     int queue_index = -1;
01247                     int path_index = -1;
01248                     bool same_covariance = false;
01249                     real log_det_tot, log_det_tpl;
01250                     if (eff_missing) {
01251                         path_index =
01252                             sample_to_path_index[current_training_sample];
01253                         // pout << "path index = " << path_index << endl;
01254                         L_tot.resize(n_non_missing, n_non_missing);
01255                         if (spanning_use_previous[current_cluster][path_index])
01256                             queue_index = cholesky_queue.length() - 1;
01257                         else
01258                             queue_index = cholesky_queue.length() - 2;
01259                         L_tpl = cholesky_queue[queue_index];
01260                         ind_tpl = indices_queue[queue_index];
01261                         if (efficient_missing == 3)
01262                             log_det_tpl = log_det_queue[queue_index];
01263 
01264                         n_tpl = L_tpl.length();
01265                         L_tot.resize(n_tpl, n_tpl);
01266                         /*
01267                         ind_tot.resize(n_non_missing);
01268                         ind_tot << non_missing;
01269                         */
01270                         ind_tot = non_missing;
01271 
01272                         // Optimization: detect when the same covariance matrix
01273                         // can be re-used.
01274                         // TODO What about just the dimensions being reordered?
01275                         // Are we losing time in such cases?
01276                         same_covariance =
01277                             ind_tpl.length() == ind_tot.length() &&
01278                             previous_training_sample >= 0;
01279                         if (same_covariance)
01280                             for (int i = 0; i < ind_tpl.length(); i++)
01281                                 if (ind_tpl[i] != ind_tot[i]) {
01282                                     same_covariance = false;
01283                                     break;
01284                                 }
01285 
01286                         /*
01287                         Mat tmp;
01288                         if (add_missing.length() > 0) {
01289                             tmp.resize(L_tot.length(), L_tot.width());
01290                             productTranspose(tmp, L_tot, L_tot);
01291                             VMat tmp_vm(tmp);
01292                             tmp_vm->saveAMAT("/u/delallea/tmp/before.amat", false,
01293                                     true);
01294                         }
01295                         */
01296 
01297                         // Remove some rows / columns.
01298                         /*
01299                         int p = add_missing.length() - 1;
01300                         for (int k = p; k >= 0; k--) {
01301                             choleskyRemoveDimension(L_tot, add_missing[k]); //(-k+p);
01302                             */
01303                             /*
01304                             tmp.resize(L_tot.length(), L_tot.width());
01305                             productTranspose(tmp, L_tot, L_tot);
01306                             VMat tmp_vm(tmp);
01307                             tmp_vm->saveAMAT("/u/delallea/tmp/before_" +
01308                                     tostring(add_missing[k]) + ".amat", false,
01309                                     true);
01310                                     */
01311                         /*
01312                         }
01313                         */
01314                     }
01315                     if ((efficient_missing == 1 || efficient_missing == 3) &&
01316                         current_training_sample >= 0)
01317                         no_missing_change[current_training_sample] =
01318                             same_covariance;
01319 
01320                     // Now we must perform updates to compute the Cholesky
01321                     // decomposition of interest.
01322                     static Vec new_vec;
01323                     int n = -1;
01324                     Mat* the_L = 0;
01325                     if (eff_missing) {
01326                     //L_tot.resize(n_non_missing, n_non_missing);
01327                         /*
01328                     for (int k = 0; k < add_non_missing.length(); k++) {
01329                         new_vec.resize(L_tot.length() + 1);
01330                         for (int q = 0; q < new_vec.length(); q++)
01331                             new_vec[q] = cov_y(tot_non_missing[q],
01332                                                add_non_missing[k]);
01333                         choleskyAppendDimension(L_tot, new_vec);
01334                     }
01335                         */
01336                     if (!same_covariance) {
01337                         if (efficient_missing == 1) {
01338                             //Profiler::start("updateCholeskyFromPrevious, em1");
01339                             updateCholeskyFromPrevious(L_tpl, L_tot,
01340                                     joint_cov[j], ind_tpl, ind_tot);
01341                             //Profiler::end("updateCholeskyFromPrevious, em1");
01342                         } else {
01343                             PLASSERT( efficient_missing == 3 );
01344                             //Profiler::start("updateInverseVarianceFromPrevious, em3");
01345                             updateInverseVarianceFromPrevious(L_tpl, L_tot,
01346                                     joint_cov[j], ind_tpl, ind_tot,
01347                                     &log_det_tpl, &log_det_tot);
01348                             //Profiler::end("updateInverseVarianceFromPrevious, em3");
01349 #if 0
01350                             // Check that the inverse is correctly computed.
01351                             VMat L_tpl_vm(L_tpl);
01352                             VMat L_tot_vm(L_tot);
01353                             VMat joint_cov_vm(joint_cov[j]);
01354                             Mat data_tpl(1, ind_tpl.length());
01355                             for (int q = 0; q < ind_tpl.length(); q++)
01356                                 data_tpl(0, q) = ind_tpl[q];
01357                             Mat data_tot(1, ind_tot.length());
01358                             for (int q = 0; q < ind_tot.length(); q++)
01359                                 data_tot(0, q) = ind_tot[q];
01360                             VMat ind_tpl_vm(data_tpl);
01361                             VMat ind_tot_vm(data_tot);
01362                             L_tpl_vm->saveAMAT("/u/delallea/tmp/L_tpl_vm.amat",
01363                                     false, true);
01364                             L_tot_vm->saveAMAT("/u/delallea/tmp/L_tot_vm.amat",
01365                                     false, true);
01366                             joint_cov_vm->saveAMAT("/u/delallea/tmp/joint_cov_vm.amat",
01367                                     false, true);
01368                             ind_tpl_vm->saveAMAT("/u/delallea/tmp/ind_tpl_vm.amat",
01369                                     false, true);
01370                             ind_tot_vm->saveAMAT("/u/delallea/tmp/ind_tot_vm.amat",
01371                                     false, true);
01372 #endif
01373                         }
01374                     }
01375                     // Note to myself: indices in ind_tot will be changed.
01376 
01377                     // Debug check.
01378                     /*
01379                     static Mat tmp_mat;
01380                     tmp_mat.resize(L_tot.length(), L_tot.length());
01381                     productTranspose(tmp_mat, L_tot, L_tot);
01382                     // pout << "max = " << max(tmp_mat) << endl;
01383                     // pout << "min = " << min(tmp_mat) << endl;
01384                     */
01385                     the_L = same_covariance ? &L_tpl : &L_tot;
01386                     real* the_log_det = same_covariance ? &log_det_tpl
01387                                                         : &log_det_tot;
01388                     n = the_L->length();
01389                     if (efficient_missing == 1) {
01390                         for (int i = 0; i < n; i++)
01391                             log_det += pl_log((*the_L)(i, i));
01392                     } else {
01393                         PLASSERT( efficient_missing == 3 );
01394 #if 0
01395                         VMat the_L_vm(*the_L);
01396                         the_L_vm->saveAMAT("/u/delallea/tmp/L.amat", false,
01397                                 true);
01398 #endif
01399                         if (is_missing(*the_log_det)) {
01400                             // That can happen due to numerical imprecisions.
01401                             // In such a case we have to recompute the
01402                             // determinant and the inverse.
01403                             PLASSERT( !same_covariance );
01404                             the_cov_y_missing->setMod(n_non_missing);
01405                             the_cov_y_missing->resize(n_non_missing, n_non_missing);
01406                             for (int k = 0; k < n_non_missing; k++)
01407                                 for (int q = 0; q < n_non_missing; q++)
01408                                     (*the_cov_y_missing)(k,q) =
01409                                         cov_y(non_missing[k], non_missing[q]);
01410                             *the_log_det = det((*the_cov_y_missing), true);
01411                             matInvert(*the_cov_y_missing, *the_L);
01412                             fillItSymmetric(*the_L);
01413                         }
01414 
01415                         // Note: we need to multiply the log-determinant by 0.5
01416                         // compared to 'efficient_missing == 1' because the
01417                         // determinant computed from Cholesky is the one for L,
01418                         // which is the squared root of the one of the full
01419                         // matrix.
01420                         log_det += 0.5 * *the_log_det;
01421                     }
01422                     PLASSERT( !(isnan(log_det) || isinf(log_det)) );
01423                     log_likelihood = -0.5 * (n * Log2Pi) - log_det;
01424                     }
01425 
01426                     y_centered.resize(n_non_missing);
01427                     if (!eff_missing) {
01428                     mu_y = *the_mu_y_missing;
01429                     eigenvals = eigenvals_missing;
01430                     eigenvecs = *eigenvecs_missing;
01431 
01432                     y_centered << y_missing;
01433                     y_centered -= mu_y;
01434                     }
01435 
01436                     real* center_j = center[j];
01437                     if (eff_missing) {
01438                         for (int k = 0; k < n_non_missing; k++) {
01439                             int ind_tot_k = ind_tot[k];
01440                             y_centered[k] =
01441                                 y[ind_tot_k] - center_j[ind_tot_k];
01442                         }
01443 
01444                     static Vec tmp_vec1;
01445                     if (impute_missing && current_training_sample >= 0) {
01446                         // We need to store the conditional expectation of the
01447                         // sample missing values.
01448                         static Vec tmp_vec2;
01449                         tmp_vec1.resize(the_L->length());
01450                         tmp_vec2.resize(the_L->length());
01451                         if (efficient_missing == 1)
01452                             choleskySolve(*the_L, y_centered, tmp_vec1, tmp_vec2);
01453                         else {
01454                             PLASSERT( efficient_missing == 3 );
01455                             product(tmp_vec1, *the_L, y_centered);
01456                         }
01457                         static Mat K2;
01458                         int ind_tot_length = ind_tot.length();
01459                         K2.resize(cov_y.length() - ind_tot_length,
01460                                   ind_tot.length());
01461                         for (int i = 0; i < K2.length(); i++)
01462                             for (int k = 0; k < K2.width(); k++)
01463                                 K2(i,k) = cov_y(coord_missing[i],
01464                                                 non_missing[k]);
01465                         static Vec cond_mean;
01466                         cond_mean.resize(coord_missing.length());
01467                         product(cond_mean, K2, tmp_vec1);
01468                         static Vec full_vec;
01469                         // TODO Right now, we store the full data vector. It
01470                         // may be more efficient to only store the missing
01471                         // values.
01472                         full_vec.resize(D);
01473                         full_vec << y;
01474                         for (int i = 0; i < coord_missing.length(); i++)
01475                             full_vec[coord_missing[i]] =
01476                                 cond_mean[i] + center_j[coord_missing[i]];
01477                         clust_imputed_missing[j](path_index) << full_vec;
01478                     }
01479 
01480                     if (n > 0) {
01481                         if (efficient_missing == 1) {
01482                             tmp_vec1.resize(y_centered.length());
01483                             choleskyLeftSolve(*the_L, y_centered, tmp_vec1);
01484                             log_likelihood -= 0.5 * pownorm(tmp_vec1);
01485                         } else {
01486                             PLASSERT( efficient_missing == 3 );
01487                             log_likelihood -= 0.5 * dot(y_centered, tmp_vec1);
01488                         }
01489                     }
01490                     // Now remember L_tot for the generations to come.
01491                     // TODO This could probably be optimized to avoid useless
01492                     // copies of the covariance matrix.
01493                     bool cannot_free =
01494                         !spanning_can_free[current_cluster][path_index];
01495                     if (cannot_free)
01496                         queue_index++;
01497                     cholesky_queue.resize(queue_index + 1);
01498                     indices_queue.resize(queue_index + 1);
01499                     if (efficient_missing == 3)
01500                         log_det_queue.resize(queue_index + 1);
01501                     // pout << "length = " << cholesky_queue.length() << endl;
01502 
01503                     // Free a reference to element in cholesky_queue. This
01504                     // is needed because this matrix is going to be resized.
01505                     L_tpl = dummy_mat;
01506 
01507                     if (!same_covariance || cannot_free) {
01508                     Mat& chol = cholesky_queue[queue_index];
01509                     chol.resize(L_tot.length(), L_tot.width());
01510                     chol << L_tot;
01511                     TVec<int>& ind = indices_queue[queue_index];
01512                     ind.resize(ind_tot.length());
01513                     ind << ind_tot;
01514                     if (efficient_missing == 3)
01515                         log_det_queue[queue_index] = log_det_tot;
01516                     }
01517 
01518                     // pout << "queue_index = " << queue_index << endl;
01519 
01520                     }
01521 
01522                     if (!eff_missing) {
01523                     // real squared_norm_y_centered = pownorm(y_centered);
01524                     int n_eig = n_non_missing;
01525 
01526                     real lambda0 = var_min;
01527                     if (!eigenvals.isEmpty() && eigenvals.lastElement() > lambda0)
01528                         lambda0 = eigenvals.lastElement();
01529                     PLASSERT( lambda0 > 0 );
01530                     real one_over_lambda0 = 1.0 / lambda0;
01531 
01532                     log_likelihood = precomputeGaussianLogCoefficient(
01533                             eigenvals, n_non_missing);
01534 
01535                     static Vec y_centered_copy;
01536                     y_centered_copy.resize(y_centered.length());
01537                     y_centered_copy << y_centered; // Backup vector.
01538                     for (int k = 0; k < n_eig - 1; k++) {
01539                         real lambda = max(var_min, eigenvals[k]);
01540                         PLASSERT( lambda > 0 );
01541                         Vec eigen_k = eigenvecs(k);
01542                         real dot_k = dot(eigen_k, y_centered);
01543                         log_likelihood -= 0.5 * square(dot_k) / lambda;
01544                         multiplyAcc(y_centered, eigen_k, -dot_k);
01545                     }
01546                     log_likelihood -=
01547                         0.5 * pownorm(y_centered) * one_over_lambda0;
01548                     y_centered << y_centered_copy; // Restore original vector.
01549 
01550 #if 0
01551                     // Old code, that had stability issues when dealing with
01552                     // large numbers.
01553 
01554                     // log_likelihood -= 0.5  * 1/lambda_0 * ||y - mu||^2
01555                     log_likelihood -=
01556                         0.5 * one_over_lambda0 * squared_norm_y_centered;
01557 
01558                     for (int k = 0; k < n_eig - 1; k++) {
01559                         // log_likelihood -= 0.5 * (1/lambda_k - 1/lambda_0)
01560                         //                       * ((y - mu)'.v_k)^2
01561                         real lambda = max(var_min, eigenvals[k]);
01562                         PLASSERT( lambda > 0 );
01563                         if (lambda > lambda0)
01564                             log_likelihood -=
01565                                 0.5 * (1.0 / lambda - one_over_lambda0)
01566                                     * square(dot(eigenvecs(k), y_centered));
01567                     }
01568 #endif
01569 
01570                     // Release pointer to 'eigenvecs_missing'.
01571                     eigenvecs = dummy_mat;
01572                     eigenvecs_missing = &eigenvecs_missing_storage;
01573 
01574                     if (impute_missing && current_training_sample >= 0) {
01575                         // We need to store the conditional expectation of the
01576                         // sample missing values.
01577                         // For this we compute H3^-1, since this expectation is
01578                         // equal to mu_y - H3^-1 H2 (x - mu_x).
01579                         static Mat H3;
01580                         static Mat H2;
01581                         Mat& H3_inv = H3_inverse[j];
01582                         int n_missing = coord_missing.length();
01583                         if (!eff_naive_missing ||
01584                                 need_recompute[current_training_sample]) {
01585                         H3.setMod(n_missing);
01586                         H3.resize(n_missing, n_missing);
01587                         H3_inv.resize(n_missing, n_missing);
01588                         for (int i = 0; i < n_missing; i++)
01589                             for (int k = 0; k < n_missing; k++)
01590                                 H3(i,k) = (*inv_cov_y)(coord_missing[i],
01591                                         coord_missing[k]);
01592                         PLASSERT( H3.isSymmetric(true, true) );
01593                         matInvert(H3, H3_inv);
01594                         // PLASSERT( H3_inv.isSymmetric(false, true) );
01595                         fillItSymmetric(H3_inv);
01596                         }
01597 
01598                         H2.resize(n_missing, n_non_missing);
01599                         for (int i = 0; i < n_missing; i++)
01600                             for (int k = 0; k < n_non_missing; k++)
01601                                 H2(i,k) = (*inv_cov_y)(coord_missing[i],
01602                                                        non_missing[k]);
01603                         static Vec H2_y_centered;
01604                         H2_y_centered.resize(n_missing);
01605                         product(H2_y_centered, H2, y_centered);
01606                         static Vec cond_mean;
01607                         cond_mean.resize(n_missing);
01608                         product(cond_mean, H3_inv, H2_y_centered);
01609                         static Vec full_vec;
01610                         // TODO Right now, we store the full data vector. It
01611                         // may be more efficient to only store the missing
01612                         // values.
01613                         full_vec.resize(D);
01614                         full_vec << y;
01615                         for (int i = 0; i < n_missing; i++)
01616                             full_vec[coord_missing[i]] =
01617                                 center_j[coord_missing[i]] - cond_mean[i];
01618                         PLASSERT( !full_vec.hasMissing() );
01619                         imputed_missing[j]->putRow(current_training_sample,
01620                                                    full_vec);
01621                     }
01622 
01623                     }
01624             //}
01625             } else {
01626                 log_likelihood = log_coeff[j];
01627 
01628                 mu_y = center(j).subVec(0, n_predicted);
01629                 eigenvals = eigenvalues(j);
01630                 eigenvecs = eigenvectors[j];
01631 
01632                 y_centered.resize(n_predicted);
01633                 y_centered << y;
01634                 y_centered -= mu_y;
01635                 real squared_norm_y_centered = pownorm(y_centered);
01636                 real var_min = square(sigma_min);
01637                 int n_eig = n_eigen_computed;
01638                 real lambda0 = max(var_min, eigenvals[n_eig - 1]);
01639                 PLASSERT( lambda0 > 0 );
01640 
01641                 real one_over_lambda0 = 1.0 / lambda0;
01642                 // log_likelihood -= 0.5  * 1/lambda_0 * ||y - mu||^2
01643                 log_likelihood -= 0.5 * one_over_lambda0 * squared_norm_y_centered;
01644 
01645                 for (int k = 0; k < n_eig - 1; k++) {
01646                     // log_likelihood -= 0.5 * (1/lambda_k - 1/lambda_0)
01647                     //                       * ((y - mu)'.v_k)^2
01648                     real lambda = max(var_min, eigenvals[k]);
01649                     PLASSERT( lambda > 0 );
01650                     if (lambda > lambda0)
01651                         log_likelihood -= 0.5 * (1.0 / lambda - one_over_lambda0)
01652                             * square(dot(eigenvecs(k), y_centered));
01653                 }
01654             }
01655         } else {
01656             if (y.hasMissing()) {
01657                 // TODO Code duplication is ugly!
01658                 if (is_predictor) {
01659                     non_missing.resize(0);
01660                     for (int k = 0; k < y.length(); k++)
01661                         if (!is_missing(y[k]))
01662                             non_missing.append(k);
01663                     int n_non_missing = non_missing.length();
01664                     int n_predicted_ext = n_predicted + (n_predictor - n_non_missing);
01665 
01666                     work_mat1.resize(n_predicted_ext, n_non_missing);
01667                     work_mat2.resize(n_predicted_ext, n_predicted_ext);
01668                     real var_min = square(sigma_min);
01669                     eigenvalues_x_miss.resize(L, n_non_missing);
01670                     eigenvectors_x_miss.resize(L);
01671                     // Compute the mean and covariance of x and y|x for the j-th
01672                     // Gaussian (we will need them to compute the likelihood).
01673                     // TODO Do we really compute the mean of y|x here?
01674                     // TODO This is pretty ugly but it seems to work: replace by
01675                     // better-looking code.
01676 
01677                     // First we compute the joint covariance matrix from the
01678                     // eigenvectors and eigenvalues:
01679                     // full_cov = sum_k (lambda_k - lambda0) v_k v_k' + lambda0.I
01680 
01681                     PLASSERT( n_predictor + n_predicted == D );
01682 
01683                     Mat& full_cov_j = full_cov;
01684                     full_cov_j.resize(D, D);
01685                     eigenvals = eigenvalues(j);
01686                     real lambda0 = max(var_min, eigenvals[n_eigen_computed - 1]);
01687 
01688                     full_cov_j.fill(0);
01689                     Mat& eigenvectors_j = eigenvectors[j];
01690                     PLASSERT( eigenvectors_j.width() == D );
01691 
01692                     for (int k = 0; k < n_eigen_computed - 1; k++)
01693                         externalProductScaleAcc(
01694                                 full_cov_j, eigenvectors_j(k),
01695                                 eigenvectors_j(k),
01696                                 max(var_min, eigenvals[k]) - lambda0);
01697 
01698                     for (int i = 0; i < D; i++)
01699                         full_cov_j(i,i) += lambda0;
01700 
01701                     // By construction, the resulting matrix is symmetric. However,
01702                     // it may happen that it is not exactly the case due to numerical
01703                     // approximations. Thus we ensure it is perfectly symmetric.
01704                     PLASSERT( full_cov_j.isSymmetric(false) );
01705                     fillItSymmetric(full_cov_j);
01706 
01707                     // Extract the covariance of the predictor x.
01708                     Mat cov_x_j_miss = full_cov.subMat(0, 0, n_predictor, n_predictor);
01709                     cov_x_j.resize(n_non_missing, n_non_missing);
01710                     for (int k = 0; k < n_non_missing; k++)
01711                         for (int p = k; p < n_non_missing; p++)
01712                             cov_x_j(k,p) = cov_x_j(p,k) =
01713                                 cov_x_j_miss(non_missing[k], non_missing[p]);
01714 
01715                     // Compute its SVD.
01716                     eigenvectors_x_miss[j].resize(n_non_missing, n_non_missing);
01717                     eigenvals = eigenvalues_x_miss(j);
01718                     eigenVecOfSymmMat(cov_x_j, n_non_missing, eigenvals,
01719                                       eigenvectors_x_miss[j]);
01720 
01721                     y_non_missing.resize(n_non_missing);
01722                     center_non_missing.resize(n_non_missing);
01723                     for (int k = 0; k < n_non_missing; k++) {
01724                         center_non_missing[k] = center(j, non_missing[k]);
01725                         y_non_missing[k] = y[non_missing[k]];
01726                     }
01727 
01728 
01729                     log_likelihood =
01730                         precomputeGaussianLogCoefficient(eigenvals, n_non_missing);
01731                     eigenvecs = eigenvectors_x_miss[j];
01732                     y_centered.resize(n_non_missing);
01733                     y_centered << y_non_missing;
01734                     mu = center_non_missing;
01735 
01736                 } else {
01737                     // We need to re-do everything again, now this sucks!
01738                     // First the full covariance (of y|x).
01739                     Mat& cov_y = cov_y_x;
01740                     real var_min = square(sigma_min);
01741                     cov_y.resize(n_predicted, n_predicted);
01742                     eigenvals = eigenvalues_y_x(j);
01743                     real lambda0 = max(var_min, eigenvals.lastElement());
01744                     cov_y.fill(0);
01745                     Mat& eigenvectors_j = eigenvectors_y_x[j];
01746                     int n_eig = eigenvectors_j.length();
01747 
01748                     PLASSERT( eigenvectors_j.width() == n_predicted );
01749 
01750                     for (int k = 0; k < n_eig - 1; k++)
01751                         externalProductScaleAcc(
01752                                 cov_y, eigenvectors_j(k), eigenvectors_j(k),
01753                                 max(var_min, eigenvals[k]) - lambda0);
01754 
01755                     for (int i = 0; i < n_predicted; i++)
01756                         cov_y(i,i) += lambda0;
01757 
01758                     // By construction, the resulting matrix is symmetric. However,
01759                     // it may happen that it is not exactly the case due to numerical
01760                     // approximations. Thus we ensure it is perfectly symmetric.
01761                     PLASSERT( cov_y.isSymmetric(false) );
01762                     fillItSymmetric(cov_y);
01763                     // Then extract what we want.
01764                     non_missing.resize(0);
01765                     for (int k = 0; k < n_predicted; k++)
01766                         if (!is_missing(y[k]))
01767                             non_missing.append(k);
01768                     mu_y = center_y_x(j);
01769                     int n_non_missing = non_missing.length();
01770                     the_mu_y_missing->resize(n_non_missing);
01771                     y_missing.resize(n_non_missing);
01772                     the_cov_y_missing->resize(n_non_missing, n_non_missing);
01773                     for (int k = 0; k < n_non_missing; k++) {
01774                         (*the_mu_y_missing)[k] = mu_y[non_missing[k]];
01775                         y_missing[k] = y[non_missing[k]];
01776                         for (int q = 0; q < n_non_missing; q++) {
01777                             (*the_cov_y_missing)(k,q) =
01778                                 cov_y(non_missing[k], non_missing[q]);
01779                         }
01780                     }
01781                     if (n_non_missing == 0) {
01782                         log_likelihood = 0;
01783                     } else {
01784                         // Perform SVD of cov_y_missing.
01785                         eigenVecOfSymmMat(*the_cov_y_missing, n_non_missing,
01786                                 eigenvals_missing, *eigenvecs_missing);
01787 
01788                         mu_y = *the_mu_y_missing;
01789                         eigenvals = eigenvals_missing;
01790                         eigenvecs = *eigenvecs_missing;
01791 
01792                         y_centered.resize(n_non_missing);
01793                         y_centered << y_missing;
01794                         y_centered -= mu_y;
01795                         real squared_norm_y_centered = pownorm(y_centered);
01796                         int n_eigen = n_non_missing;
01797 
01798                         lambda0 = max(var_min, eigenvals.lastElement());
01799                         PLASSERT( lambda0 > 0 );
01800                         real one_over_lambda0 = 1.0 / lambda0;
01801 
01802                         log_likelihood = precomputeGaussianLogCoefficient(
01803                                 eigenvals, n_non_missing);
01804                         // log_likelihood -= 0.5  * 1/lambda_0 * ||y - mu||^2
01805                         log_likelihood -=
01806                             0.5 * one_over_lambda0 * squared_norm_y_centered;
01807 
01808                         for (int k = 0; k < n_eigen - 1; k++) {
01809                             // log_likelihood -= 0.5 * (1/lambda_k - 1/lambda_0)
01810                             //                       * ((y - mu)'.v_k)^2
01811                             real lambda = max(var_min, eigenvals[k]);
01812                             PLASSERT( lambda > 0 );
01813                             if (lambda > lambda0)
01814                                 log_likelihood -=
01815                                     0.5 * (1.0 / lambda - one_over_lambda0)
01816                                     * square(dot(eigenvecs(k), y_centered));
01817                         }
01818                         // Allow future resize of 'eigenvecs_missing'.
01819                         eigenvecs = dummy_mat;
01820                     }
01821 
01822                     //Profiler::end("computeLogLikelihood");
01823                     return log_likelihood;
01824                 }
01825 
01826                 if (y_centered.length() > 0) {
01827                     y_centered -= mu;
01828 
01829                     real squared_norm_y_centered = pownorm(y_centered);
01830                     real var_min = square(sigma_min);
01831                     int n_eig = eigenvals.length();
01832 
01833                     real lambda0 = max(var_min, eigenvals.lastElement());
01834                     PLASSERT( lambda0 > 0 );
01835 
01836                     real one_over_lambda0 = 1.0 / lambda0;
01837                     // log_likelihood -= 0.5  * 1/lambda_0 * ||y - mu||^2
01838                     log_likelihood -= 0.5 * one_over_lambda0 * squared_norm_y_centered;
01839 
01840                     for (int k = 0; k < n_eig - 1; k++) {
01841                         // log_likelihood -= 0.5 * (1/lambda_k - 1/lambda_0)
01842                         //                       * ((y - mu)'.v_k)^2
01843                         real lambda = max(var_min, eigenvals[k]);
01844                         PLASSERT( lambda > 0 );
01845                         PLASSERT( lambda >= lambda0 );
01846                         if (lambda > lambda0)
01847                             log_likelihood -= 0.5 * (1.0 / lambda - one_over_lambda0)
01848                                 * square(dot(eigenvecs(k), y_centered));
01849                     }
01850                 }
01851             } else {
01852 
01853                 if (is_predictor) {
01854                     log_likelihood = log_coeff_x[j];
01855                     mu = center(j).subVec(0, n_predictor);
01856                     eigenvals = eigenvalues_x(j);
01857                     eigenvecs = eigenvectors_x[j];
01858                     y_centered.resize(n_predictor);
01859                 } else {
01860                     log_likelihood = log_coeff_y_x[j];
01861                     mu = center_y_x(j);
01862                     eigenvals = eigenvalues_y_x(j);
01863                     eigenvecs = eigenvectors_y_x[j];
01864                     y_centered.resize(n_predicted);
01865                 }
01866 
01867                 y_centered << y;
01868                 y_centered -= mu;
01869 
01870                 real squared_norm_y_centered = pownorm(y_centered);
01871                 real var_min = square(sigma_min);
01872                 int n_eig = eigenvals.length();
01873 
01874                 real lambda0 = max(var_min, eigenvals[n_eig - 1]);
01875                 PLASSERT( lambda0 > 0 );
01876 
01877                 real one_over_lambda0 = 1.0 / lambda0;
01878                 // log_likelihood -= 0.5  * 1/lambda_0 * ||y - mu||^2
01879                 log_likelihood -= 0.5 * one_over_lambda0 * squared_norm_y_centered;
01880 
01881                 for (int k = 0; k < n_eig - 1; k++) {
01882                     // log_likelihood -= 0.5 * (1/lambda_k - 1/lambda_0)
01883                     //                       * ((y - mu)'.v_k)^2
01884                     real lambda = max(var_min, eigenvals[k]);
01885                     PLASSERT( lambda > 0 );
01886                     PLASSERT( lambda >= lambda0 );
01887                     if (lambda > lambda0)
01888                         log_likelihood -= 0.5 * (1.0 / lambda - one_over_lambda0)
01889                             * square(dot(eigenvecs(k), y_centered));
01890                 }
01891             }
01892 
01893             // Free a potential reference to 'eigenvalues_x_miss' and
01894             // 'eigenvectors_x_miss'.
01895             eigenvals = dummy_vec;
01896             eigenvecs = dummy_mat;
01897         }
01898     }
01899     PLASSERT( !isnan(log_likelihood) );
01900     //Profiler::end("computeLogLikelihood");
01901     return log_likelihood;
01902 }
01903 
01905 // computeAllLogLikelihoods //
01907 void GaussMix::computeAllLogLikelihoods(const Vec& sample, const Vec& log_like)
01908 {
01909     PLASSERT( sample.length()   == D );
01910     PLASSERT( log_like.length() == L );
01911     for (int j = 0; j < L; j++)
01912         log_like[j] = computeLogLikelihood(sample, j);
01913 }
01914 
01916 // computePosteriors //
01918 void GaussMix::computePosteriors() {
01919     //Profiler::start("computePosteriors");
01920     sample_row.resize(D);
01921     if (impute_missing) {
01922         sum_of_posteriors.resize(L); // TODO Do that in resize method.
01923         sum_of_posteriors.fill(0);
01924     }
01925     log_likelihood_post.resize(L);
01926     if (impute_missing)
01927         // Clear the additional 'error_covariance' matrix.
01928         for (int j = 0; j < L; j++)
01929             error_covariance[j].fill(0);
01930     if (efficient_missing == 1 || efficient_missing == 3) {
01931         // Loop over all clusters.
01932         for (int k = 0; k < missing_template.length(); k++) {
01933             const TVec<int>& samples_clust = spanning_path[k];
01934             int n_samp = samples_clust.length();
01935             log_likelihood_post_clust.resize(n_samp, L);
01936             current_cluster = k;
01937             if (impute_missing)
01938                 for (int j = 0; j < L; j++)
01939                     clust_imputed_missing[j].resize(n_samp, D);
01940             for (int j = 0; j < L; j++) {
01941                 // For each Gaussian, go through all samples in the cluster.
01942                 previous_training_sample = -1;
01943                 for (int i = 0; i < samples_clust.length(); i++) {
01944                     int s = samples_clust[i];
01945                     current_training_sample = s;
01946                     train_set->getSubRow(s, 0, sample_row);
01947                     log_likelihood_post_clust(i, j) =
01948                         computeLogLikelihood(sample_row, j) + pl_log(alpha[j]);
01949                     previous_training_sample = current_training_sample;
01950                     current_training_sample = -1;
01951                 }
01952             }
01953             previous_training_sample = -2;
01954             // Get the posteriors for all samples in the cluster.
01955             for (int i = 0; i < samples_clust.length(); i++) {
01956                 real log_sum_likelihood = logadd(log_likelihood_post_clust(i));
01957                 int s = samples_clust[i];
01958                 for (int j = 0; j < L; j++) {
01959                     real post = exp(log_likelihood_post_clust(i, j) -
01960                                     log_sum_likelihood);
01961                     posteriors(s, j) = post;
01962                     if (impute_missing)
01963                         sum_of_posteriors[j] += post;
01964                 }
01965             }
01966             if (!impute_missing)
01967                 continue;
01968             // We should now be ready to impute missing values.
01969             for (int i = 0; i < samples_clust.length(); i++) {
01970                 int s = samples_clust[i];
01971                 for (int j = 0; j < L; j++) {
01972                     PLASSERT( !clust_imputed_missing[j](i).hasMissing() );
01973                     // TODO We are most likely wasting memory here.
01974                     imputed_missing[j]->putRow(s, clust_imputed_missing[j](i));
01975                 }
01976             }
01977 
01978             // If the 'impute_missing' method is used, we now need to compute
01979             // the extra contribution to the covariance matrix.
01980             for (int j = 0; j < L; j++) {
01981                 // For each Gaussian, go through all samples in the cluster.
01982                 previous_training_sample = -1;
01983                 for (int i = 0; i < samples_clust.length(); i++) {
01984                     int s = samples_clust[i];
01985                     current_training_sample = s;
01986                     train_set->getSubRow(s, 0, sample_row);
01987                     addToCovariance(sample_row, j, error_covariance[j],
01988                             posteriors(s, j));
01989                     previous_training_sample = current_training_sample;
01990                     current_training_sample = -1;
01991                 }
01992             }
01993             previous_training_sample = -2;
01994         }
01995     } else {
01996         previous_training_sample = -1;
01997     for (int i = 0; i < nsamples; i++) {
01998         train_set->getSubRow(i, 0, sample_row);
01999         // First we need to compute the likelihood P(s_i | j).
02000         current_training_sample = i;
02001         computeAllLogLikelihoods(sample_row, log_likelihood_post);
02002         PLASSERT( !log_likelihood_post.hasMissing() );
02003         for (int j = 0; j < L; j++)
02004             log_likelihood_post[j] += pl_log(alpha[j]);
02005         real log_sum_likelihood = logadd(log_likelihood_post);
02006         for (int j = 0; j < L; j++) {
02007             // Compute the posterior
02008             // P(j | s_i) = P(s_i | j) * alpha_i / (sum_i ")
02009             real post = exp(log_likelihood_post[j] - log_sum_likelihood);
02010             posteriors(i,j) = post;
02011             if (impute_missing)
02012                 sum_of_posteriors[j] += post;
02013         }
02014         // Add contribution to the covariance matrix if needed.
02015         if (impute_missing) {
02016             for (int j = 0; j < L; j++) {
02017                 real post = posteriors(i,j);
02018                 int k_count = 0;
02019                 for (int k = 0; k < sample_row.length(); k++)
02020                     if (is_missing(sample_row[k])) {
02021                         int l_count = 0;
02022                         for (int l = 0; l < sample_row.length(); l++)
02023                             if (is_missing(sample_row[l])) {
02024                                 error_covariance[j](k, l) +=
02025                                     post * H3_inverse[j](k_count, l_count);
02026                                 l_count++;
02027                             }
02028                         k_count++;
02029                     }
02030             }
02031             int dummy_test = 0;
02032             dummy_test++;
02033         }
02034         previous_training_sample = current_training_sample;
02035         current_training_sample = -1;
02036     }
02037     previous_training_sample = -2;
02038     }
02039     //Profiler::end("computePosteriors");
02040 }
02041 
02043 // computeMixtureWeights //
02045 bool GaussMix::computeMixtureWeights(bool allow_replace) {
02046     bool replaced_gaussian = false;
02047     if (L==1)
02048         alpha[0] = 1;
02049     else {
02050         alpha.fill(0);
02051         for (int i = 0; i < nsamples; i++)
02052             for (int j = 0; j < L; j++)
02053                 alpha[j] += posteriors(i,j);
02054         alpha /= real(nsamples);
02055         for (int j = 0; j < L && !replaced_gaussian; j++)
02056             if (alpha[j] < alpha_min && allow_replace
02057                                      && stage_replaced[j] != this->stage) {
02058                 // alpha[j] is too small! We need to remove this Gaussian from
02059                 // the mixture, and find a new (better) one.
02060                 replaceGaussian(j);
02061                 replaced_gaussian = true;
02062                 stage_replaced[j] = this->stage;
02063             }
02064     }
02065     return replaced_gaussian;
02066 }
02067 
02069 // expectation //
02071 void GaussMix::expectation(Vec& mu) const
02072 {
02073     mu.resize(n_predicted);
02074     if (type_id == TYPE_SPHERICAL || type_id == TYPE_DIAGONAL ||
02075        (type_id == TYPE_GENERAL && n_predictor == 0)) {
02076         // The expectation is the same in the 'spherical' and 'diagonal' cases.
02077         mu.fill(0);
02078         real* coeff = n_predictor == 0 ? alpha.data() : p_j_x.data();
02079         for (int j = 0; j < L; j++)
02080             mu += center(j).subVec(n_predictor, n_predicted) * coeff[j];
02081     } else {
02082         PLASSERT( type_id == TYPE_GENERAL );
02083         // The case 'n_predictor == 0' is considered above.
02084         PLASSERT( n_predictor > 0 );
02085         mu.fill(0);
02086         for (int j = 0; j < L; j++)
02087             mu += center_y_x(j) * p_j_x[j];
02088     }
02089 }
02090 
02092 // missingExpectation //
02094 void GaussMix::missingExpectation(const Vec& input, Vec& mu)
02095 {
02096     static TVec<int> coord_missing;
02097     static TVec<int> coord_non_missing;
02098     static TVec<int> coord_reordered;
02099     static Vec input_non_missing;
02100     static Mat center_backup;
02101     static Mat mat_storage;
02102     static TVec<Mat> eigenvectors_backup;
02103     if (!input.hasMissing()) {
02104         mu.resize(0);
02105         return;
02106     }
02107     if (type_id != TYPE_GENERAL)
02108         PLERROR("In GaussMix::missingExpectation - Not implemented for this "
02109                 "type");
02110 
02111     // Create coordinate indices lists.
02112     coord_missing.resize(0);
02113     coord_non_missing.resize(0);
02114     input_non_missing.resize(0);
02115     for (int i = 0; i < input.length(); i++)
02116         if (is_missing(input[i]))
02117             coord_missing.append(i);
02118         else {
02119             coord_non_missing.append(i);
02120             input_non_missing.append(input[i]);
02121         }
02122     int n_missing = coord_missing.length();
02123     int n_non_missing = coord_non_missing.length();
02124     coord_reordered.resize(input.length());
02125     coord_reordered.subVec(0, n_non_missing) << coord_non_missing;
02126     coord_reordered.subVec(n_non_missing, n_missing) << coord_missing;
02127 
02128     // Backup existing data.
02129     center_backup.resize(center.length(), center.width());
02130     center_backup << center;
02131     eigenvectors_backup.resize(eigenvectors.length());
02132     for (int i = 0; i < eigenvectors.length(); i++) {
02133         Mat& eigenvecs_backup = eigenvectors_backup[i];
02134         Mat& eigenvecs = eigenvectors[i];
02135         eigenvecs_backup.resize(eigenvecs.length(), eigenvecs.width());
02136         eigenvecs_backup << eigenvecs;
02137     }
02138     int predictor_size_backup = predictor_size;
02139     int predicted_size_backup = predicted_size;
02140 
02141     // Update components to match the new reordered coordinates.
02142     selectColumns(center_backup, coord_reordered, center);
02143     for (int i = 0; i < eigenvectors.length(); i++)
02144         selectColumns(eigenvectors_backup[i], coord_reordered,
02145                                               eigenvectors[i]);
02146 
02147     // Set this distribution as conditional to compute the expectation of the
02148     // missing (predicted) part given the observed (predictor) part.
02149     setPredictorPredictedSizes(n_non_missing, n_missing);
02150     setPredictor(input_non_missing);
02151 
02152     // Compute the expectation.
02153     expectation(mu);
02154 
02155     // Restore everything.
02156     setPredictorPredictedSizes(predictor_size_backup, predicted_size_backup);
02157     center << center_backup;
02158     for (int i = 0; i < eigenvectors.length(); i++) {
02159         Mat& eigenvecs_backup = eigenvectors_backup[i];
02160         Mat& eigenvecs = eigenvectors[i];
02161         eigenvecs << eigenvecs_backup;
02162     }
02163 }
02164 
02166 // forget //
02168 void GaussMix::forget()
02169 {
02170     inherited::forget();
02171     log_p_j_x.resize(0);
02172     p_j_x.resize(0);
02173     D = -1;
02174     n_eigen_computed = -1;
02175     ptimer->resetAllTimers();
02176     stage_replaced.fill(-1);
02177     /*
02178        if (training_time >= 0)
02179        training_time = 0;
02180        if (conditional_updating_time >= 0)
02181        conditional_updating_time = 0;
02182        n_tries.resize(0);
02183     */
02184 }
02185 
02187 // generate //
02189 void GaussMix::generate(Vec& x) const
02190 {
02191     generateFromGaussian(x, -1);
02192 }
02193 
02195 // generateFromGaussian //
02197 void GaussMix::generateFromGaussian(Vec& sample, int given_gaussian) const {
02198     // TODO Why not having p_j_x point to alpha when n_predictor == 0 ? This may
02199     // make the code cleaner (but check what happens with serialization...).
02200     int j;    // The index of the Gaussian to use.
02201 
02202     // The assert below may fail if one forgets to provide a predictor part
02203     // through the 'setPredictor' method.
02204     PLASSERT( n_predictor == 0 || p_j_x.length() == L );
02205 
02206     if (given_gaussian < 0)
02207         j = random_gen->multinomial_sample(n_predictor == 0 ? alpha : p_j_x);
02208     else
02209         j = given_gaussian % alpha.length();
02210 
02211     sample.resize(n_predicted);
02212 
02213     if (type_id == TYPE_SPHERICAL || type_id == TYPE_DIAGONAL) {
02214         Vec mu_y = center(j).subVec(n_predictor, n_predicted);
02215         for (int k = 0; k < n_predicted; k++) {
02216             real stddev = type_id == TYPE_SPHERICAL ? sigma[j]
02217                                                     : diags(j, k + n_predictor);
02218             stddev = max(sigma_min, stddev);
02219             sample[k] = random_gen->gaussian_mu_sigma(mu_y[k], stddev);
02220         }
02221     } else {
02222         PLASSERT( type_id == TYPE_GENERAL );
02223         static Vec norm_vec;
02224         if (n_predictor == 0) {
02225             // Simple case.
02226             PLASSERT( eigenvectors[j].width() == n_predicted );
02227             PLASSERT( center(j).length() == n_predicted );
02228 
02229             Vec eigenvals = eigenvalues(j);
02230             Mat eigenvecs = eigenvectors[j].subMat(0, 0, n_eigen_computed,
02231                                                          n_predicted);
02232             int n_eig = n_eigen_computed;
02233             Vec mu_y = center(j);
02234 
02235             norm_vec.resize(n_eig - 1);
02236             random_gen->fill_random_normal(norm_vec);
02237             real var_min = square(sigma_min);
02238             real lambda0 = max(var_min, eigenvals[n_eig - 1]);
02239             sample.fill(0);
02240             for (int k = 0; k < n_eig - 1; k++)
02241                 // TODO See if can use more optimized function.
02242                 sample += sqrt(max(var_min, eigenvals[k]) - lambda0)
02243                           * norm_vec[k] * eigenvecs(k);
02244             norm_vec.resize(n_predicted);
02245             random_gen->fill_random_normal(norm_vec);
02246             sample += norm_vec * sqrt(lambda0);
02247             sample += mu_y;
02248         } else {
02249             // TODO Get rid of code duplication with above.
02250 
02251             Vec eigenvals = eigenvalues_y_x(j);
02252             Mat eigenvecs = eigenvectors_y_x[j];
02253 
02254             int n_eig = n_predicted;
02255             Vec mu_y = center_y_x(j);
02256 
02257             norm_vec.resize(n_eig - 1);
02258             random_gen->fill_random_normal(norm_vec);
02259             real var_min = square(sigma_min);
02260             real lambda0 = max(var_min, eigenvals[n_eig - 1]);
02261             sample.fill(0);
02262             for (int k = 0; k < n_eig - 1; k++)
02263                 // TODO See if can use more optimized function.
02264                 sample += sqrt(max(var_min, eigenvals[k]) - lambda0)
02265                           * norm_vec[k] * eigenvecs(k);
02266             norm_vec.resize(n_predicted);
02267             random_gen->fill_random_normal(norm_vec);
02268             sample += norm_vec * sqrt(lambda0);
02269             sample += mu_y;
02270         }
02271     }
02272     PLASSERT( !sample.hasMissing() );
02273 }
02274 
02275 /*
02277 // getNEigenComputed //
02279 int GaussMix::getNEigenComputed() const {
02280     return n_eigen_computed;
02281 }
02282 
02284 // getEigenvectors //
02286 Mat GaussMix::getEigenvectors(int j) const {
02287     //return eigenvectors[j];
02288 }
02289 
02291 // getEigenvals //
02293 Vec GaussMix::getEigenvals(int j) const {
02294     //return eigenvalues(j);
02295 }
02296 */
02297 
02299 // kmeans //
02301 void GaussMix::kmeans(const VMat& samples, int nclust, TVec<int>& clust_idx,
02302                       Mat& clust, int maxit)
02303 // TODO Put it into the PLearner framework.
02304 {
02305     int vmat_length = samples.length();
02306     clust.resize(nclust,samples->inputsize());
02307     clust_idx.resize(vmat_length);
02308 
02309     Vec input(samples->inputsize());
02310     Vec target(samples->targetsize());
02311     real weight;
02312 
02313     TVec<int> old_clust_idx(vmat_length);
02314     bool ok=false;
02315 
02316     // Compute mean and standard deviation for all fields (will be used to
02317     // generate some random values to replace missing values).
02318     computeMeanAndStddev(samples, mean_training, stddev_training);
02319 
02320     if (mean_training.hasMissing())
02321         // Some features are completely missing: we assume mean is 0 and
02322         // standard deviation is 1.
02323         for (int i = 0; i < mean_training.length(); i++)
02324             if (is_missing(mean_training[i])) {
02325                 mean_training[i] = 0;
02326                 stddev_training[i] = 1;
02327             }
02328 
02329     if (stddev_training.hasMissing())
02330         // There may be only one sample with a non-missing value, we assume the
02331         // standard deviation is 1 (probably not always a good idea, but it
02332         // should not really matter in any real-life application).
02333         for (int i = 0; i < stddev_training.length(); i++)
02334             if (is_missing(stddev_training[i]))
02335                 stddev_training[i] = 1;
02336 
02337     // Build a nclust-long vector of samples indexes to initialize cluster
02338     // centers. In order to avoid some local minima, try to span as much of the
02339     // space as possible by systematically choosing as initial cluster center
02340     // the point 'farthest' from current centers.
02341     TVec<int> start_idx(nclust, -1);
02342 
02343     // Store the distance from each point to the 'nclust' cluster centers.
02344     Mat distances(vmat_length, nclust);
02345     Vec min_distances(vmat_length);
02346     int farthest_sample = random_gen->uniform_multinomial_sample(vmat_length);
02347     if (!original_to_reordered.isEmpty())
02348         farthest_sample = original_to_reordered[farthest_sample];
02349     Vec input_k;
02350     for (int i=0; i<nclust; i++)
02351     {
02352         start_idx[i] = farthest_sample;
02353         samples->getExample(farthest_sample,input,target,weight);
02354         clust(i) << input;
02355         // Ensure there are no missing values in the initial centers.
02356         // To do so we generate random values based on 'mean' and 'stddev' if
02357         // the center we picked turns out to have missing values.
02358         Vec cl_center = clust(i);
02359         for (int k = 0; k < cl_center.length(); k++)
02360             if (is_missing(cl_center[k]))
02361                 cl_center[k] =
02362                     random_gen->gaussian_mu_sigma(mean_training[k],
02363                                                   stddev_training[k]);
02364         if (i < nclust - 1) {
02365             // Find next cluster center.
02366             for (int k = 0; k < vmat_length; k++) {
02367                 samples->getExample(k, input_k, target, weight);
02368                 real dist = 0;
02369                 int count = 0;
02370                 for (int j = 0; j < input_k.length(); j++)
02371                     if (!is_missing(input_k[j])) {
02372                         dist += fabs(input_k[j] - cl_center[j]);
02373                         count++;
02374                     }
02375                 if (count > 0)
02376                     dist /= real(count);
02377                 distances(k, i) = dist;
02378                 min_distances[k] = min(distances(k).subVec(0, i + 1));
02379             }
02380             farthest_sample = argmax(min_distances);
02381         }
02382     }
02383 
02384     PP<ProgressBar> pb;
02385     if (report_progress)
02386         pb = new ProgressBar("Performing K-Means to initialize centers", maxit);
02387     int iteration = maxit;
02388     TVec<VecStatsCollector> clust_stat(nclust);
02389     Vec clust_i;
02390     Vec nnonmissing(input.length());
02391     while(!ok && iteration--)
02392     {
02393         for (int i = 0; i < clust_stat.length(); i++)
02394             clust_stat[i].forget();
02395         old_clust_idx << clust_idx;
02396         for(int i=0;i<vmat_length;i++)
02397         {
02398             samples->getExample(i,input,target,weight);
02399             real dist,bestdist = REAL_MAX;
02400             int bestclust=0;
02401             if (nclust>1) for(int j=0;j<nclust;j++)
02402                 if((dist = powdistance(input, clust(j), 2.0, true)) < bestdist)
02403                 {
02404                     bestdist=dist;
02405                     bestclust=j;
02406                 }
02407             clust_idx[i] = bestclust;
02408             clust_stat[bestclust].update(input, weight);
02409         }
02410 
02411         for (int i = 0; i < nclust; i++) {
02412             clust_i = clust(i);
02413             int j;
02414             for (j = 0;
02415                  j < input.length()
02416                     && clust_stat[i].stats.length() > 0
02417                     && is_equal(clust_stat[i].getStats(j).nnonmissing(), 0);
02418                  j++) {}
02419             if (j < input.length())
02420                 // There have been some samples assigned to this cluster.
02421                 clust_stat[i].getMean(clust_i);
02422             else {
02423                 // Re-initialize randomly the cluster center.
02424                 int new_center =
02425                     random_gen->uniform_multinomial_sample(vmat_length);
02426                 if (!original_to_reordered.isEmpty())
02427                     new_center = original_to_reordered[new_center];
02428                 samples->getExample(new_center, input, target, weight);
02429                 clust_i << input;
02430             }
02431             // Replace missing values by randomly generated values.
02432             for (int k = 0; k < clust_i.length(); k++)
02433                 if (is_missing(clust_i[k]))
02434                     clust_i[k] =
02435                         random_gen->gaussian_mu_sigma(mean_training  [k],
02436                                                       stddev_training[k]);
02437         }
02438 
02439         ok=true;
02440 
02441         if (nclust>1)
02442             for(int i=0;i<vmat_length;i++)
02443                 if(old_clust_idx[i]!=clust_idx[i])
02444                 {
02445                     ok=false;
02446                     break;
02447                 }
02448         if (report_progress)
02449             pb->update(maxit - iteration + 1);
02450     }
02451     if (report_progress && verbosity >= 2 && iteration > 0)
02452         pout << "K-Means performed in only " << maxit - iteration << " iterations."
02453              << endl;
02454 }
02455 
02457 // log_density //
02459 real GaussMix::log_density(const Vec& y) const
02460 {
02461     log_likelihood_dens.resize(L);
02462     // First we need to compute the likelihood
02463     //   p(y,j | x) = p(y | x,j) * p(j | x).
02464     for (int j = 0; j < L; j++) {
02465         real logp_j_x = n_predictor == 0 ? pl_log(alpha[j])
02466                                      : log_p_j_x[j];
02467         log_likelihood_dens[j] = computeLogLikelihood(y, j) + logp_j_x;
02468         PLASSERT( !isnan(log_likelihood_dens[j]) );
02469     }
02470     return logadd(log_likelihood_dens);
02471 }
02472 
02474 // makeDeepCopyFromShallowCopy //
02476 void GaussMix::makeDeepCopyFromShallowCopy(CopiesMap& copies)
02477 {
02478     inherited::makeDeepCopyFromShallowCopy(copies);
02479 
02480     deepCopyField(log_likelihood_post,      copies);
02481     deepCopyField(sample_row,               copies);
02482     deepCopyField(H3_inverse,               copies);
02483     deepCopyField(ptimer,                   copies);
02484     deepCopyField(missing_patterns,         copies);
02485     deepCopyField(missing_template,         copies);
02486     deepCopyField(sample_to_path_index,     copies);
02487     deepCopyField(spanning_path,            copies);
02488     deepCopyField(spanning_use_previous,    copies);
02489     deepCopyField(spanning_can_free,        copies);
02490     deepCopyField(log_likelihood_post_clust,copies);
02491     deepCopyField(clusters_samp,            copies);
02492     deepCopyField(cholesky_queue,           copies);
02493     deepCopyField(log_det_queue,            copies);
02494     deepCopyField(imputed_missing,          copies);
02495     deepCopyField(clust_imputed_missing,    copies);
02496     deepCopyField(sum_of_posteriors,        copies);
02497     deepCopyField(no_missing_change,        copies);
02498     deepCopyField(cond_var_inv_queue,       copies);
02499     deepCopyField(indices_queue,            copies);
02500     deepCopyField(indices_inv_queue,        copies);
02501     deepCopyField(mean_training,            copies);
02502     deepCopyField(stddev_training,          copies);
02503     deepCopyField(error_covariance,         copies);
02504     deepCopyField(posteriors,               copies);
02505     deepCopyField(initial_weights,          copies);
02506     deepCopyField(updated_weights,          copies);
02507     deepCopyField(eigenvectors_x,           copies);
02508     deepCopyField(eigenvalues_x,            copies);
02509     deepCopyField(y_x_mat,                  copies);
02510     deepCopyField(eigenvectors_y_x,         copies);
02511     deepCopyField(eigenvalues_y_x,          copies);
02512     deepCopyField(center_y_x,               copies);
02513     deepCopyField(log_p_j_x,                copies);
02514     deepCopyField(p_j_x,                    copies);
02515     deepCopyField(log_coeff,                copies);
02516     deepCopyField(log_coeff_x,              copies);
02517     deepCopyField(log_coeff_y_x,            copies);
02518     deepCopyField(joint_cov,                copies);
02519     deepCopyField(joint_inv_cov,            copies);
02520     deepCopyField(chol_joint_cov,           copies);
02521     // deepCopyField(chol_cov_template,        copies);
02522     deepCopyField(stage_joint_cov_computed, copies);
02523     deepCopyField(stage_replaced,           copies);
02524     deepCopyField(sample_to_template,       copies);
02525     deepCopyField(y_centered,               copies);
02526     deepCopyField(covariance,               copies);
02527     deepCopyField(log_likelihood_dens,      copies);
02528     deepCopyField(need_recompute,           copies);
02529     deepCopyField(original_to_reordered,    copies);
02530 
02531     deepCopyField(diags,                    copies);
02532     deepCopyField(eigenvalues,              copies);
02533     deepCopyField(eigenvectors,             copies);
02534 
02535     deepCopyField(alpha,                    copies);
02536     deepCopyField(center,                   copies);
02537     deepCopyField(sigma,                    copies);
02538 
02539     // TODO Update!
02540 }
02541 
02543 // outputsize //
02545 int GaussMix::outputsize() const {
02546     int os = inherited::outputsize();
02547     for (size_t i = 0; i < outputs_def.length(); i++)
02548         if (outputs_def[i] == 'p')
02549             // We add L-1 because in inherited::outpusize() this was already
02550             // counted as 1.
02551             os += L - 1;
02552     return os;
02553 }
02554 
02556 // precomputeAllGaussianLogCoefficients //
02558 void GaussMix::precomputeAllGaussianLogCoefficients()
02559 {
02560     if (type_id == TYPE_SPHERICAL || type_id == TYPE_DIAGONAL) {
02561         // Nothing to do.
02562     } else {
02563         PLASSERT( type_id == TYPE_GENERAL );
02564         // Precompute the log_coeff.
02565         for (int j = 0; j < L; j++)
02566             log_coeff[j] = precomputeGaussianLogCoefficient(eigenvalues(j), D);
02567     }
02568 }
02569 
02571 // precomputeGaussianLogCoefficient //
02573 real GaussMix::precomputeGaussianLogCoefficient(const Vec& eigenvals,
02574                                                 int dimension) const
02575 {
02576 #ifdef BOUNDCHECK
02577     real last_eigenval = numeric_limits<double>::infinity();
02578 #endif
02579     int n_eig = eigenvals.length();
02580     PLASSERT( dimension >= n_eig );
02581     real log_det = 0;
02582     real var_min = square(sigma_min);
02583     for (int k = 0; k < n_eig; k++) {
02584 #ifdef BOUNDCHECK
02585         if (var_min < epsilon && eigenvals[k] < epsilon)
02586             PLWARNING("In GaussMix::precomputeGaussianLogCoefficient - An "
02587                       "eigenvalue is near zero");
02588         if (eigenvals[k] > last_eigenval)
02589             PLERROR("In GaussMix::precomputeGaussianLogCoefficient - The "
02590                     "eigenvalues must be sorted in decreasing order");
02591         last_eigenval = eigenvals[k];
02592 #endif
02593         log_det += pl_log(max(var_min, eigenvals[k]));
02594     }
02595     if (dimension > n_eig)
02596         // Only the first 'n_eig' eigenvalues are given: we assume
02597         // the other eigenvalues are equal to the last given one.
02598         log_det += pl_log(max(var_min, eigenvals.lastElement()))
02599                  * (dimension - n_eig);
02600     return -0.5 * (dimension * Log2Pi + log_det);
02601 }
02602 
02604 // replaceGaussian //
02606 void GaussMix::replaceGaussian(int j) {
02607     // This is supposed to be called only during training, when there is no
02608     // predictor part (we use the full joint distribution).
02609     PLASSERT( n_predictor == 0 );
02610     // Find the Gaussian with highest weight.
02611     int high = argmax(alpha);
02612     PLASSERT( high != j );
02613     // Generate the new center from this Gaussian.
02614     Vec new_center = center(j);
02615     generateFromGaussian(new_center, high);
02616     // Copy the covariance.
02617     if (type_id == TYPE_SPHERICAL) {
02618         sigma[j] = sigma[high];
02619     } else if (type_id == TYPE_DIAGONAL) {
02620         diags(j) << diags(high);
02621     } else {
02622         PLASSERT( type_id == TYPE_GENERAL );
02623         eigenvalues(j) << eigenvalues(high);
02624         eigenvectors[j] << eigenvectors[high];
02625         log_coeff[j] = log_coeff[high];
02626         stage_joint_cov_computed[j] = -1;
02627     }
02628     // Arbitrarily takes half of the weight of this Gaussian.
02629     alpha[high] /= 2.0;
02630     alpha[j] = alpha[high];
02631 }
02632 
02634 // resizeDataBeforeUsing //
02636 void GaussMix::resizeDataBeforeUsing()
02637 {
02638     eigenvectors_x.resize(0);
02639     eigenvectors_y_x.resize(0);
02640     joint_cov.resize(0);
02641     joint_inv_cov.resize(0);
02642     chol_joint_cov.resize(0);
02643     log_coeff.resize(0);
02644     log_coeff_x.resize(0);
02645     log_coeff_y_x.resize(0);
02646     stage_joint_cov_computed.resize(0);
02647     y_x_mat.resize(0);
02648 
02649     // chol_cov_template.resize(0, 0);
02650     center_y_x.resize(0, 0);
02651     eigenvalues_x.resize(0, 0);
02652     eigenvalues_y_x.resize(0, 0);
02653 
02654     // Type-specific data.
02655     switch(type_id)
02656     {
02657     case TYPE_SPHERICAL:
02658     case TYPE_DIAGONAL:
02659         break;
02660     case TYPE_GENERAL:
02661         eigenvectors_x.resize(L);
02662         eigenvectors_y_x.resize(L);
02663         joint_cov.resize(L);
02664         joint_inv_cov.resize(L);
02665         chol_joint_cov.resize(L);
02666         log_coeff_x.resize(L);
02667         log_coeff_y_x.resize(L);
02668         stage_joint_cov_computed.resize(L);
02669         stage_joint_cov_computed.fill(-1);
02670         y_x_mat.resize(L);
02671 
02672         // if (efficient_missing)
02673             // chol_cov_template.resize(efficient_k_median, L);
02674         if (n_predictor >= 0)
02675             eigenvalues_x.resize(L, n_predictor);
02676         if (n_predicted >= 0) 
02677         {
02678             center_y_x.resize(L, n_predicted);
02679             eigenvalues_y_x.resize(L, n_predicted);
02680         }
02681         log_coeff.resize(L);
02682         break;
02683 
02684     default:
02685         PLERROR("Invalid type_id");
02686     }
02687 }
02688 
02690 // resizeDataBeforeTraining //
02692 void GaussMix::resizeDataBeforeTraining() {
02693     PLASSERT( train_set );
02694 
02695     n_eigen_computed = -1;
02696 
02697     nsamples = train_set->length();
02698     D = train_set->inputsize();
02699 
02700     if (f_eigen > 0){
02701         if (is_equal(f_eigen, 1))
02702             n_eigen = -1;
02703         else {
02704             n_eigen = int(round(f_eigen * D));
02705             if (n_eigen == 0)
02706                 // We always want to keep at least one eigenvector.
02707                 n_eigen = 1;
02708         }
02709     }
02710 
02711     alpha.resize(L);
02712     clust_imputed_missing.resize(0);
02713     eigenvectors.resize(0);
02714     H3_inverse.resize(0);
02715     imputed_missing.resize(0);
02716     mean_training.resize(0);
02717     no_missing_change.resize(0);
02718     sigma.resize(0);
02719     stddev_training.resize(0);
02720 
02721     center.resize(L, D);
02722     covariance.resize(0, 0);
02723     diags.resize(0, 0);
02724     eigenvalues.resize(0, 0);
02725     error_covariance.resize(0);
02726     initial_weights.resize(nsamples);
02727     //posteriors.resize(nsamples, L);
02728     //updated_weights.resize(L, nsamples);
02729     stage_replaced.resize(L);
02730     stage_replaced.fill(-1);
02731 
02732     // Type-specific data.
02733     switch(type_id)
02734     {
02735     case TYPE_SPHERICAL:
02736         sigma.resize(L);
02737         break;
02738     case TYPE_DIAGONAL:
02739         diags.resize(L, D);
02740         break;
02741     case TYPE_GENERAL:
02742         eigenvectors.resize(L);
02743 
02744         if (n_eigen == -1 || n_eigen == D)
02745             // We need to compute all eigenvectors.
02746             n_eigen_computed = D;
02747         else 
02748         {
02749             if (n_eigen > D || n_eigen < 1)
02750                 PLERROR("In GaussMix::resizeDataBeforeTraining - Invalid value"
02751                         " for 'n_eigen' (%d), should be between 1 and %d",
02752                         n_eigen, D);
02753             n_eigen_computed = n_eigen + 1;
02754         }
02755         eigenvalues.resize(L, n_eigen_computed);
02756         for (int i = 0; i < eigenvectors.length(); i++)
02757             eigenvectors[i].resize(n_eigen_computed, D);
02758         if (impute_missing) 
02759         {
02760             H3_inverse.resize(L);
02761             error_covariance.resize(L);
02762             imputed_missing.resize(L);
02763             for (int j = 0; j < L; j++) 
02764             {
02765                 error_covariance[j].resize(D, D);
02766                 imputed_missing[j] = new MemoryVMatrix(nsamples, D);
02767             }
02768             /*
02769             PPath fname = "/u/delallea/tmp/imputed_missing.pmat";
02770             imputed_missing = new FileVMatrix(fname, nsamples, D);
02771             */
02772             // TODO May be useful to handle other types of VMats for large
02773             // datasets.
02774             // TODO Move outside of this method.
02775             if (efficient_missing == 1 || efficient_missing == 3)
02776                 clust_imputed_missing.resize(L);
02777         }
02778         if (efficient_missing == 1 || efficient_missing == 3)
02779             no_missing_change.resize(nsamples);
02780         break;
02781 
02782     default:
02783         PLERROR("Invalid type_id");
02784     }
02785 }
02786 
02788 // setPredictor //
02790 void GaussMix::setPredictor(const Vec& predictor, bool call_parent) const {
02791     static Vec log_p_x_j_alphaj;
02792     static Vec x_minus_mu_x; // Used to store 'x - mu_x'.
02793     static TVec<int> missing, non_missing;
02794     static Mat work_mat1, work_mat2;
02795     static Mat full_cov;
02796     static Mat cov_x_j;
02797     static Mat inv_cov_x;
02798     static Mat cov_y_x;
02799     static Mat cross_cov;
02800     static TVec<Mat> eigenvectors_x_miss;
02801     static Mat eigenvalues_x_miss;
02802     static TVec<Mat> y_x_mat_miss;
02803 
02804     if (call_parent)
02805         inherited::setPredictor(predictor);
02806 
02807     if (n_predictor == 0) {
02808         // There is no predictor part anyway: nothing to do.
02809         PLASSERT( predictor_part.isEmpty() );
02810         return;
02811     }
02812 
02813     if (stage == 0)
02814         // The Gaussian mixture is not ready yet (it has not yet been
02815         // trained): there is nothing more we can do.
02816         // Note that this is also why one needs to set a stage > 0 if the
02817         // Gaussian mixture parameters are set by hand (and not learnt).
02818         return;
02819 
02820     // We need to compute:
02821     // p(j | x) = p(x | j) p(j) / p(x)
02822     //          = p(x | j) p(j) / sum_k p(x | k) p(k)
02823 
02824     if (type_id == TYPE_GENERAL) {
02825         // We need to compute E[Y|x,j].
02826         if (!predictor_part.hasMissing()) {
02827             // Simple case: the predictor part has no missing value, and we can
02828             // re-use the quantities computed in setPredictorPredictedSizes(..).
02829 
02830             // If the previous predictor part set had missing values, we will
02831             // need to recompute some important variables (e.g. eigenvectors /
02832             // values of y|x). This can be done by re-setting the sizes.
02833             // TODO This is a bit hackish... we may want to actually store the
02834             // appropriate data elsewhere so that there is no need to recompute
02835             // it again.
02836             if (previous_predictor_part_had_missing)
02837                 setPredictorPredictedSizes_const();
02838 
02839             previous_predictor_part_had_missing = false;
02840             x_minus_mu_x.resize(n_predictor);
02841             Vec mu_target;
02842             for (int j = 0; j < L; j++) {
02843                 x_minus_mu_x << predictor_part;
02844                 x_minus_mu_x -= center(j).subVec(0, n_predictor);
02845                 mu_target = center_y_x(j);
02846                 if (n_predictor > 0)
02847                     product(mu_target, y_x_mat[j], x_minus_mu_x);
02848                 else
02849                     mu_target.fill(0);
02850                 mu_target += center(j).subVec(n_predictor, n_predicted);
02851             }
02852         } else {
02853             previous_predictor_part_had_missing = true;
02854             // TODO Code duplication is ugly!
02855             non_missing.resize(0);
02856             missing.resize(0);
02857             for (int k = 0; k < predictor_part.length(); k++)
02858                 if (!is_missing(predictor_part[k]))
02859                     non_missing.append(k);
02860                 else
02861                     missing.append(k);
02862             int n_non_missing = non_missing.length();
02863             int n_missing = missing.length();
02864             int n_predicted_ext = n_predicted + n_missing;
02865             PLASSERT( n_missing + n_non_missing == n_predictor );
02866 
02867             work_mat1.resize(n_predicted_ext, n_non_missing);
02868             work_mat2.resize(n_predicted_ext, n_predicted_ext);
02869             Vec eigenvals;
02870             real var_min = square(sigma_min);
02871             eigenvalues_x_miss.resize(L, n_non_missing);
02872             eigenvectors_x_miss.resize(L);
02873             for (int j = 0; j < L; j++) {
02874                 // First we compute the joint covariance matrix from the
02875                 // eigenvectors and eigenvalues:
02876                 // full_cov = sum_k (lambda_k - lambda0) v_k v_k' + lambda0.I
02877                 // TODO Do we really need to compute the full matrix?
02878 
02879                 PLASSERT( n_predictor + n_predicted == D );
02880 
02881                 Mat& full_cov_j = full_cov;
02882                 full_cov_j.resize(D, D);
02883                 eigenvals = eigenvalues(j);
02884                 real lambda0 = max(var_min, eigenvals[n_eigen_computed - 1]);
02885 
02886                 full_cov_j.fill(0);
02887                 Mat& eigenvectors_j = eigenvectors[j];
02888                 PLASSERT( eigenvectors_j.width() == D );
02889 
02890                 for (int k = 0; k < n_eigen_computed - 1; k++)
02891                     externalProductScaleAcc(
02892                             full_cov_j, eigenvectors_j(k),
02893                             eigenvectors_j(k),
02894                             max(var_min, eigenvals[k]) - lambda0);
02895 
02896                 for (int i = 0; i < D; i++)
02897                     full_cov_j(i,i) += lambda0;
02898 
02899                 // By construction, the resulting matrix is symmetric. However,
02900                 // it may happen that it is not exactly the case due to numerical
02901                 // approximations. Thus we ensure it is perfectly symmetric.
02902                 PLASSERT( full_cov_j.isSymmetric(false) );
02903                 fillItSymmetric(full_cov_j);
02904 
02905                 // Extract the covariance of the predictor x.
02906                 Mat cov_x_j_miss = full_cov.subMat(0, 0, n_predictor, n_predictor);
02907                 cov_x_j.setMod(n_non_missing);
02908                 cov_x_j.resize(n_non_missing, n_non_missing);
02909                 for (int k = 0; k < n_non_missing; k++)
02910                     for (int p = k; p < n_non_missing; p++)
02911                         cov_x_j(k,p) = cov_x_j(p,k) =
02912                             cov_x_j_miss(non_missing[k], non_missing[p]);
02913 
02914                 // Compute its inverse.
02915                 /*
02916                 inv_cov_x.resize(n_non_missing, n_non_missing);
02917                 matInvert(cov_x_j, inv_cov_x);
02918                 //PLASSERT( inv_cov_x.isSymmetric(false) );
02919                 fillItSymmetric(inv_cov_x);
02920                 */
02921 
02922 #if 1
02923                 // Compute its SVD.
02924                 eigenvectors_x_miss[j].resize(n_non_missing, n_non_missing);
02925                 eigenvals = eigenvalues_x_miss(j);
02926                 eigenVecOfSymmMat(cov_x_j, n_non_missing, eigenvals,
02927                                   eigenvectors_x_miss[j]);
02928 
02929                 // And its inverse (we'll need it for the covariance of y|x).
02930                 inv_cov_x.resize(n_non_missing, n_non_missing);
02931                 inv_cov_x.fill(0);
02932                 if (n_non_missing > 0) {
02933                     // I am not sure about this assert, but since we extract the
02934                     // covariance of x from a matrix whose eigenvalues are all more
02935                     // than 'var_min', it looks like the eigenvalues of the
02936                     // covariance of x should also be more than 'var_min'. If I am
02937                     // wrong, remove the assert and see if it is needed to
02938                     // potentially set lambda0 to var_min.
02939                     PLASSERT( eigenvals.lastElement() > var_min ||
02940                             eigenvals.lastElement() / var_min > 0.99 );
02941                     lambda0 = eigenvals.lastElement();
02942                     real one_over_lambda0 = 1 / lambda0;
02943                     Mat& eigenvectors_x_j = eigenvectors_x_miss[j];
02944                     for (int k = 0; k < n_non_missing - 1; k++)
02945                         externalProductScaleAcc(
02946                                 inv_cov_x, eigenvectors_x_j(k), eigenvectors_x_j(k),
02947                                 1 / max(var_min, eigenvals[k]) - one_over_lambda0);
02948                     for (int i = 0; i < n_non_missing; i++)
02949                         inv_cov_x(i,i) += one_over_lambda0;
02950                 }
02951 #endif
02952 
02953                 // Compute the covariance of y|x.
02954                 // It is only needed when there is a predictor part, since
02955                 // otherwise we can simply use the full covariance.
02956                 // TODO See if we can use simpler formulas.
02957                 Mat& cov_y_x_j = cov_y_x; // TODO Can we get rid of cov_y_x_j?
02958                 cov_y_x_j.resize(n_predicted_ext, n_predicted_ext);
02959                 cov_y_x_j.subMat(0, 0, n_predicted, n_predicted) <<
02960                     full_cov_j.subMat(n_predictor, n_predictor, n_predicted, n_predicted);
02961                 for (int k = 0; k < n_missing; k++) {
02962                     int x_missing = missing[k];
02963                     for (int p = 0; p < n_predicted_ext; p++) {
02964                         if (p < n_predicted)
02965                             cov_y_x_j(n_predicted + k, p) =
02966                                 cov_y_x_j(p, n_predicted + k) =
02967                                 full_cov_j(x_missing, p + n_predictor);
02968                         else
02969                             cov_y_x_j(n_predicted + k, p) =
02970                                 cov_y_x_j(p, n_predicted + k) =
02971                                 full_cov_j(x_missing, missing[p - n_predicted]);
02972                     }
02973                 }
02974 
02975                 y_x_mat_miss.resize(L);
02976                 y_x_mat_miss[j].resize(n_predicted, n_non_missing);
02977                 if (n_non_missing > 0) {
02978                     cross_cov.resize(n_predicted_ext, n_non_missing);
02979                     for (int k = 0; k < n_non_missing; k++) {
02980                         for (int p = 0; p < n_predicted_ext; p++) {
02981                             if (p < n_predicted)
02982                                 cross_cov(p, k) =
02983                                     full_cov_j(non_missing[k],p + n_predictor);
02984                             else
02985                                 cross_cov(p, k) =
02986                                     full_cov_j(non_missing[k],
02987                                                missing[p - n_predicted]);
02988                         }
02989                     }
02990                                                             
02991                     /*
02992                        // Old (and BUGGED) code!
02993                     cross_cov =
02994                         full_cov_j.subMat(n_non_missing, 0,
02995                                           n_predicted_ext, n_non_missing);
02996                                           */
02997                     product(work_mat1, cross_cov, inv_cov_x);
02998                     productTranspose(work_mat2, work_mat1, cross_cov);
02999                     cov_y_x_j -= work_mat2;
03000                     y_x_mat_miss[j] << work_mat1.subMat(0, 0,
03001                                                    n_predicted, n_non_missing);
03002                 }
03003                 // Compute SVD of the covariance of y|x.
03004                 eigenvectors_y_x[j].resize(n_predicted, n_predicted);
03005                 eigenvals = eigenvalues_y_x(j);
03006                 // Extract the covariance of the predicted part we are really
03007                 // interested in.
03008                 cov_y_x = cov_y_x_j.subMat(0, 0, n_predicted, n_predicted);
03009                 // Ensure covariance matrix is perfectly symmetric.
03010                 PLASSERT( cov_y_x.isSymmetric(false, true) );
03011                 fillItSymmetric(cov_y_x);
03012                 eigenVecOfSymmMat(cov_y_x, n_predicted,
03013                                   eigenvals, eigenvectors_y_x[j]);
03014                 log_coeff_y_x[j] =
03015                     precomputeGaussianLogCoefficient(eigenvals, n_predicted);
03016             }
03017 
03018             x_minus_mu_x.resize(n_non_missing);
03019             Vec mu_target;
03020             for (int j = 0; j < L; j++) {
03021                 for (int k = 0; k < n_non_missing; k++)
03022                     x_minus_mu_x[k] =
03023                         predictor_part[non_missing[k]] - center(j, non_missing[k]);
03024                 mu_target = center_y_x(j);
03025                 if (n_non_missing > 0)
03026                     product(mu_target, y_x_mat_miss[j], x_minus_mu_x);
03027                 else
03028                     mu_target.fill(0);
03029                 mu_target += center(j).subVec(n_predictor, n_predicted);
03030             }
03031 
03032         }
03033     }
03034 
03035     log_p_x_j_alphaj.resize(L);
03036     for (int j = 0; j < L; j++)
03037         log_p_x_j_alphaj[j] = computeLogLikelihood(predictor_part, j, true)
03038                             + pl_log(alpha[j]);
03039 
03040     real log_p_x = logadd(log_p_x_j_alphaj);
03041 
03042     log_p_j_x.resize(L);
03043     p_j_x.resize(L);
03044     for (int j = 0; j < L; j++) {
03045         real t = log_p_x_j_alphaj[j] - log_p_x;
03046         log_p_j_x[j] = t;
03047         p_j_x[j] = exp(t);
03048     }
03049 
03050 }
03051 
03053 // getInitialWeightsFrom //
03055 void GaussMix::getInitialWeightsFrom(const VMat& vmat)
03056 {
03057     PLASSERT( vmat->weightsize() == 1 );
03058     Vec tmp1, tmp2;
03059     real w;
03060     PLASSERT( vmat );
03061     PP<ProgressBar> pb;
03062     if (report_progress)
03063         pb = new ProgressBar("Getting sample weights from data set",
03064                              vmat->length());
03065     for (int i = 0; i < vmat->length(); i++) {
03066         vmat->getExample(i, tmp1, tmp2, w);
03067         initial_weights[i] = w;
03068         if (report_progress)
03069             pb->update(i + 1);
03070     }
03071 }
03072 
03074 // getTrainCostNames //
03076 TVec<string> GaussMix::getTrainCostNames() const
03077 {
03078     static TVec<string> costs;
03079     if (costs.isEmpty()) {
03080         costs.append("init_time");
03081         costs.append("training_time");
03082     }
03083     return costs;
03084 }
03085 
03087 // setPredictorPredictedSizes //
03089 bool GaussMix::setPredictorPredictedSizes(int n_i, int n_t,
03090                                    bool call_parent)
03091 {
03092     bool sizes_changed = false;
03093     if (call_parent)
03094         sizes_changed =
03095             inherited::setPredictorPredictedSizes(n_i, n_t, call_parent);
03096     setPredictorPredictedSizes_const();
03097     return sizes_changed;
03098 }
03099 
03101 // setPredictorPredictedSizes_const //
03103 void GaussMix::setPredictorPredictedSizes_const() const
03104 {
03105     static Mat inv_cov_x;
03106     static Mat full_cov;
03107     static Mat cov_y_x;
03108     static Mat work_mat1, work_mat2;
03109     static Mat cross_cov;
03110 
03111     if (n_predictor == -1 || n_predicted == -1 || D == -1)
03112         // Sizes are not defined yet, there is nothing we can do.
03113         return;
03114 
03115     if (type_id == TYPE_SPHERICAL || type_id == TYPE_DIAGONAL ) {
03116         // Nothing to do.
03117     } else {
03118         PLASSERT( type_id == TYPE_GENERAL );
03119 
03120         work_mat1.resize(n_predicted, n_predictor);
03121         work_mat2.resize(n_predicted, n_predicted);
03122         Vec eigenvals;
03123         real var_min = square(sigma_min);
03124         // Resize some data accordingly.
03125         if (n_predictor >= 0)
03126             eigenvalues_x.resize(L, n_predictor);
03127         if (n_predicted >= 0) 
03128         {
03129             center_y_x.resize(L, n_predicted);
03130             eigenvalues_y_x.resize(L, n_predicted);
03131         }
03132         for (int j = 0; j < L; j++) {
03133             // Compute the covariance of x and y|x for the j-th Gaussian (we
03134             // will need them to compute the likelihood).
03135 
03136             // First we compute the joint covariance matrix from the
03137             // eigenvectors and eigenvalues:
03138             // full_cov = sum_k (lambda_k - lambda0) v_k v_k' + lambda0.I
03139 
03140             PLASSERT( n_predictor + n_predicted == D );
03141             Mat& full_cov_j = full_cov;
03142             full_cov_j.resize(D, D);
03143             eigenvals = eigenvalues(j);
03144             real lambda0 = max(var_min, eigenvals[n_eigen_computed - 1]);
03145 
03146             full_cov_j.fill(0);
03147             Mat& eigenvectors_j = eigenvectors[j];
03148             PLASSERT( eigenvectors_j.width() == D );
03149 
03150             for (int k = 0; k < n_eigen_computed - 1; k++)
03151                 externalProductScaleAcc(full_cov_j, eigenvectors_j(k),
03152                                         eigenvectors_j(k),
03153                                         max(var_min, eigenvals[k]) - lambda0);
03154             for (int i = 0; i < D; i++)
03155                 full_cov_j(i,i) += lambda0;
03156 
03157             // By construction, the resulting matrix is symmetric. However,
03158             // it may happen that it is not exactly the case due to numerical
03159             // approximations. Thus we ensure it is perfectly symmetric.
03160             PLASSERT( full_cov_j.isSymmetric(false) );
03161             fillItSymmetric(full_cov_j);
03162 
03163             // Extract the covariance of the predictor x.
03164             Mat cov_x_j = full_cov_j.subMat(0, 0, n_predictor, n_predictor);
03165 
03166             // Compute its SVD.
03167             eigenvectors_x[j].resize(n_predictor, n_predictor);
03168             eigenvals = eigenvalues_x(j);
03169             eigenVecOfSymmMat(cov_x_j, n_predictor, eigenvals, eigenvectors_x[j]);
03170             // Note that the computation above will have destroyed 'cov_x_j',
03171             // i.e. a part of the full covariance matrix.
03172             log_coeff_x[j] =
03173                 precomputeGaussianLogCoefficient(eigenvals, n_predictor);
03174 
03175 
03176             // And its inverse (we'll need it for the covariance of y|x).
03177             inv_cov_x.resize(n_predictor, n_predictor);
03178             inv_cov_x.fill(0);
03179             if (n_predictor > 0) {
03180                 // I am not sure about this assert, but since we extract the
03181                 // covariance of x from a matrix whose eigenvalues are all more
03182                 // than 'var_min', it looks like the eigenvalues of the
03183                 // covariance of x should also be more than 'var_min'. If I am
03184                 // wrong, remove the assert and see if it is needed to
03185                 // potentially set lambda0 to var_min.
03186                 PLASSERT( eigenvals[n_predictor - 1] > var_min ||
03187                         eigenvals[n_predictor - 1] / var_min > 0.99 );
03188                 lambda0 = eigenvals[n_predictor - 1];
03189                 real one_over_lambda0 = 1 / lambda0;
03190                 Mat& eigenvectors_x_j = eigenvectors_x[j];
03191                 for (int k = 0; k < n_predictor - 1; k++)
03192                     externalProductScaleAcc(
03193                         inv_cov_x, eigenvectors_x_j(k), eigenvectors_x_j(k),
03194                         1 / max(var_min, eigenvals[k]) - one_over_lambda0);
03195                 for (int i = 0; i < n_predictor; i++)
03196                     inv_cov_x(i,i) += one_over_lambda0;
03197             }
03198 
03199             // Compute the covariance of y|x.
03200             // It is only needed when there is a predictor part, since
03201             // otherwise we can simply use the full covariance.
03202             // TODO See if we can use simpler formulas.
03203             Mat& cov_y_x_j = cov_y_x; // TODO Can we get rid of cov_y_x_j?
03204             cov_y_x_j.resize(n_predicted, n_predicted);
03205             cov_y_x_j <<
03206                 full_cov_j.subMat(n_predictor, n_predictor, n_predicted, n_predicted);
03207             y_x_mat[j].resize(n_predicted, n_predictor);
03208             if (n_predictor > 0) {
03209                 cross_cov = full_cov_j.subMat(n_predictor, 0,
03210                                               n_predicted, n_predictor);
03211                 product(work_mat1, cross_cov, inv_cov_x);
03212                 productTranspose(work_mat2, work_mat1, cross_cov);
03213                 cov_y_x_j -= work_mat2;
03214                 y_x_mat[j] << work_mat1;
03215             }
03216             // Compute SVD of the covariance of y|x.
03217             // TODO Note that if n_predictor == 0 (e.g. when using the Manifold
03218             // Parzen algorithm), the covariance of y|x is also the full
03219             // covariance, and thus we should instead re-use directly the
03220             // (possibly few) eigenvectors of the full covariance matrix
03221             // instead of wasting time and memory in the computations below.
03222             eigenvectors_y_x[j].resize(n_predicted, n_predicted);
03223             eigenvals = eigenvalues_y_x(j);
03224             // Ensure covariance matrix is perfectly symmetric.
03225             PLASSERT( cov_y_x_j.isSymmetric(false, true) );
03226             fillItSymmetric(cov_y_x_j);
03227             eigenVecOfSymmMat(cov_y_x_j, n_predicted, eigenvals, eigenvectors_y_x[j]);
03228             log_coeff_y_x[j] =
03229                 precomputeGaussianLogCoefficient(eigenvals, n_predicted);
03230         }
03231     }
03232 }
03233 
03235 // setTrainingSet //
03237 void GaussMix::setTrainingSet(VMat training_set, bool call_forget)
03238 {
03239     if (efficient_missing != 2) {
03240         inherited::setTrainingSet(training_set, call_forget);
03241         return;
03242     }
03243 
03244     PP<ReorderByMissingVMatrix> reordered_training_set =
03245         new ReorderByMissingVMatrix();
03246     reordered_training_set->source = training_set;
03247     reordered_training_set->build();
03248     inherited::setTrainingSet((ReorderByMissingVMatrix*)reordered_training_set,
03249                               call_forget);
03250     // Now fill in the vector that indicates when the matrices need to be
03251     // recomputed.
03252     need_recompute.resize(training_set->length());
03253     need_recompute << reordered_training_set->missing_pattern_change;
03254 
03255     original_to_reordered.resize(training_set->length());
03256     for (int i = 0; i < training_set->length(); i++)
03257         original_to_reordered[reordered_training_set->indices[i]] = i;
03258 }
03259 
03260 // Boost graph property for edges in a binary tree.
03261 struct MissingFlag {
03262     // Indicates whether a bit is flagged as missing.
03263     bool is_missing;
03264 };
03265 
03266 // Boost graph property for nodes in a binary tree.
03267 // It is only used in leafs, to store the pattern's index.
03268 struct NoProperty {
03269     int index;
03270 };
03271 
03273 // create_list //
03275 void create_list(const TVec<int>& parent_, const TVec< TVec<int> >& children_,
03276                  TVec<int>& nodes_, TVec<bool>& use_previous_,
03277                  TVec<bool>& can_free_, int current_, bool cur_use_prev,
03278                  bool cur_can_free)
03279 {
03280     // Create list of nodes in the tree.
03281     nodes_.append(current_);
03282     use_previous_.append(cur_use_prev);
03283     can_free_.append(cur_can_free);
03284     for (int i = 0; i < children_[current_].length(); i++) {
03285         cur_use_prev = (i == 0);
03286         cur_can_free = (i == children_[current_].length() - 1);
03287         create_list(parent_, children_, nodes_, use_previous_, can_free_,
03288                     children_[current_][i], cur_use_prev, cur_can_free);
03289     }
03290 }
03291 
03293 // train //
03295 void GaussMix::train()
03296 {
03297     ptimer->startTimer("training_time");
03298     // Standard PLearner checks.
03299     if (!initTrain())
03300         return;
03301 
03302     // When training, we want to learn the full joint distribution.
03303     int backup_predicted_size = predicted_size;
03304     int backup_predictor_size = predictor_size;
03305     bool need_restore_sizes = setPredictorPredictedSizes(0, -1);
03306 
03307     // Initialization before training.
03308     if (stage == 0) {
03309         ptimer->startTimer("init_time");
03310 
03311         // Precompute nodes of the missing graph.
03312         typedef boost::adjacency_list<boost::listS, boost::vecS,
03313                 boost::directedS, NoProperty, MissingFlag> BinaryBitsTree;
03314         typedef boost::graph_traits<BinaryBitsTree>::vertex_iterator vertex_iter;
03315         typedef boost::graph_traits<BinaryBitsTree>::vertex_descriptor vertex_descr;
03316         typedef boost::graph_traits<BinaryBitsTree>::out_edge_iterator oedge_iter;
03317         typedef boost::graph_traits<BinaryBitsTree>::edge_descriptor edge_descr;
03318         typedef std::pair<oedge_iter, oedge_iter> oedge_iter_pair;
03319 
03320         BinaryBitsTree tree(1);
03321         const vertex_descr& root_vertex = *(boost::vertices(tree).first);
03322         PP<ProgressBar> pb;
03323         if ((efficient_missing == 1 || efficient_missing == 3)
03324                 && report_progress)
03325             pb = new ProgressBar("Finding unique missing patterns",
03326                                  train_set->length());
03327         Vec input, target;
03328         real weight;
03329         int n_unique = 0;
03330         missing_patterns.resize(0, train_set->inputsize());
03331         TVec<bool> pattern(train_set->inputsize());
03332         sample_to_template.resize(train_set->length());
03333         TVec< TVec<int> > pattern_to_samples;
03334         for (int i = 0; (efficient_missing == 1 || efficient_missing == 3)
03335                 && i < train_set->length(); i++) {
03336             train_set->getExample(i, input, target, weight);
03337             vertex_descr current_vertex = root_vertex;
03338             for (int k = 0; k < input.length(); k++) {
03339                 bool bit = is_missing(input[k]);
03340                 pattern[k] = bit;
03341 
03342                 const oedge_iter_pair& oeiter_pair =
03343                     boost::out_edges(current_vertex, tree);
03344                 oedge_iter oeiter = oeiter_pair.first;
03345                 while (oeiter != oeiter_pair.second &&
03346                         tree[*oeiter].is_missing != bit) {
03347                     oeiter++;
03348                 }
03349                 if (oeiter == oeiter_pair.second) {
03350                     // Could not find this bit: need to create new vertex and
03351                     // edge.
03352                     const vertex_descr& new_vertex = boost::add_vertex(tree);
03353                     const edge_descr& new_edge =
03354                         boost::add_edge(current_vertex, new_vertex,tree).first;
03355                     tree[new_edge].is_missing = bit;
03356                     current_vertex = new_vertex;
03357                     if (k == input.length() - 1) {
03358                         // This is a leaf.
03359                         n_unique++;
03360                         missing_patterns.appendRow(pattern);
03361                         int index = missing_patterns.length() - 1;
03362                         tree[current_vertex].index = index;
03363                         pattern_to_samples.append(TVec<int>());
03364                     }
03365                 } else {
03366                     // We found an existing edge.
03367                     current_vertex = boost::target(*oeiter, tree);
03368                 }
03369                 if (k == input.length() - 1) {
03370                     // Leaf node.
03371                     // First step: each sample is assigned to its missing
03372                     // pattern.
03373                     int pattern_idx = tree[current_vertex].index;
03374                     sample_to_template[i] = pattern_idx;
03375                     pattern_to_samples[pattern_idx].append(i);
03376                     // pout << sample_to_template[i] << endl;
03377                 }
03378             }
03379             if (report_progress)
03380                 pb->update(i + 1);
03381         }
03382 
03383         //TVec<int> sample_to_pattern = sample_to_template.copy();
03384 
03385         if ((efficient_missing == 1 || efficient_missing == 3)
03386                 && verbosity >= 2)
03387             pout << "Found " << n_unique << " unique missing patterns" << endl;
03388 
03389         if (efficient_missing == 1 || efficient_missing == 3) {
03390             // Perform some kind of k-median on missing patterns for initial
03391             // clustering of missing patterns.
03392             TVec<int> indices(0, missing_patterns.length() - 1, 1);
03393             // TODO Use random_gen (but -> different k-means initialization)
03394             PRandom::common(false)->shuffleElements(indices);
03395             int n_clusters = min(efficient_k_median,
03396                                  missing_patterns.length());
03397             missing_template.resize(
03398                     n_clusters, missing_patterns.width());
03399             TVec<int> missing_assign(missing_patterns.length(), -1);
03400             for (int i = 0; i < n_clusters; i++) {
03401                 missing_template(i) << missing_patterns(indices[i]);
03402             }
03403             bool finished = false;
03404             TVec<int> n_diffs(n_clusters);
03405             int count_iter = 0;
03406             if (report_progress)
03407                 pb = new ProgressBar("Performing k-median on " +
03408                         tostring(missing_patterns.length())    +
03409                         " missing patterns", efficient_k_median_iter);
03410             TMat<int> majority(n_clusters, missing_patterns.width());
03411             static TVec<int> n_assigned;
03412             while (!finished && count_iter < efficient_k_median_iter) {
03413                 finished = true;
03414                 // Assign each missing pattern to closest template.
03415                 n_assigned.resize(n_clusters);
03416                 n_assigned.fill(0);
03417                 for (int i = 0; i < missing_patterns.length(); i++) {
03418                     n_diffs.fill(0);
03419                     for (int j = 0; j < n_clusters; j++)
03420                         for (int k = 0; k < missing_patterns.width(); k++)
03421                             if (missing_patterns(i, k) !=
03422                                 missing_template(j, k))
03423                                 n_diffs[j]++;
03424                     int new_assign = argmin(n_diffs);
03425                     if (new_assign != missing_assign[i])
03426                         finished = false;
03427                     missing_assign[i] = new_assign;
03428                     n_assigned[new_assign]++;
03429                 }
03430                 // Recompute missing templates.
03431                 majority.fill(0);
03432                 for (int i = 0; i < missing_patterns.length(); i++) {
03433                     int assign = missing_assign[i];
03434                     for (int k = 0; k < missing_patterns.width(); k++) {
03435                         if (missing_patterns(i, k))
03436                             majority(assign, k)++;
03437                         else
03438                             majority(assign, k)--;
03439                     }
03440                 }
03441                 for (int j = 0; j < n_clusters; j++) {
03442                     bool not_too_many_samples =
03443                         max_samples_in_cluster == -1 ||
03444                         n_assigned[j] <= max_samples_in_cluster;
03445                     bool not_too_few_samples =
03446                         n_assigned[j] >= min_samples_in_cluster ||
03447                         (n_clusters == 1)                       ||
03448                         n_assigned[j] == -1; // Newly created cluster.
03449                     bool is_valid_cluster = not_too_many_samples    &&
03450                                             not_too_few_samples     &&
03451                                             n_assigned[j] != -1;
03452                     if (is_valid_cluster) {
03453                     for (int k = 0; k < missing_template.width(); k++)
03454                         if (majority(j, k) > 0)
03455                             missing_template(j, k) = true;
03456                         else if (majority(j, k) < 0)
03457                             missing_template(j, k) = false;
03458                         else
03459                             // TODO Use random_gen (but be careful to effects,
03460                             // e.g. kmeans initialization).
03461                             missing_template(j, k) =
03462                                 (PRandom::common(false)->uniform_sample() < 0.5);
03463                     } else if (!not_too_many_samples) {
03464                         // This cluster has too many points assigned to it
03465                         // (more than 'max_samples_in_cluster'). We split it in
03466                         // two, by picking two new centers, randomly chosen in
03467                         // this cluster.
03468                         static TVec<int> cluster_samples;
03469                         cluster_samples.resize(0);
03470                         for (int i = 0; i < missing_assign.length(); i++)
03471                             if (missing_assign[i] == j)
03472                                 cluster_samples.append(i);
03473                         int center_1 =
03474                             PRandom::common(false)->uniform_multinomial_sample(cluster_samples.length());
03475                         missing_template(j) << missing_patterns(center_1);
03476                         bool found_valid_center_2 = false;
03477                         int center_2 = -1;
03478                         while (!found_valid_center_2) {
03479                             center_2 =
03480                                 PRandom::common(false)->uniform_multinomial_sample(cluster_samples.length());
03481                             found_valid_center_2 = false;
03482                             for (int k = 0; k < missing_template.width(); k++)
03483                                 if (missing_template(j, k) !=
03484                                         missing_patterns(center_2, k)) {
03485                                     found_valid_center_2 = true;
03486                                     break;
03487                                 }
03488                         }
03489                         n_clusters++;
03490                         majority.resize(n_clusters, majority.width());
03491                         n_diffs.resize(n_clusters);
03492                         n_assigned.resize(n_clusters);
03493                         n_assigned.last() = -1;
03494                         missing_template.resize(n_clusters,
03495                                                 missing_template.width());
03496                         missing_template(n_clusters - 1) <<
03497                             missing_patterns(center_2);
03498                         finished = false;
03499                         if (verbosity >= 10)
03500                             pout << "Cluster " << j << " split in two (" <<
03501                                 n_assigned[j] << " > " <<
03502                                 max_samples_in_cluster << "), there are now "
03503                                 << n_clusters << " clusters." << endl;
03504                     } else if (!not_too_few_samples) {
03505                         // This cluster has no point assigned to it.
03506                         // If we can merge it with an existing cluster, we do
03507                         // so, otherwise we assign its center to a new pattern
03508                         // chosen randomly in the patterns set.
03509                         int candidate = 0;
03510                         while (candidate < n_clusters) {
03511                             if (n_assigned[candidate] > 0 && candidate != j &&
03512                                     (max_samples_in_cluster == -1 ||
03513                                     n_assigned[candidate] + n_assigned[j] <=
03514                                         max_samples_in_cluster)) {
03515                                 // This candidate cluster can be added the
03516                                 // points in the j-th cluster without violating
03517                                 // the maximum number of samples constraint.
03518                                 break;
03519                             }
03520                             candidate++;
03521                         }
03522                         if (candidate < n_clusters) {
03523                             // We have found a valid candidate: we can delete
03524                             // this cluster.
03525                             // Note that actually, we have no reason to believe
03526                             // that the samples in this cluster are going to be
03527                             // assigned to our candidate template.
03528                             n_assigned[candidate] += n_assigned[j];
03529                             n_clusters--;
03530                             for (int k = j; k < n_clusters; k++) {
03531                                 n_assigned[k] = n_assigned[k + 1];
03532                                 missing_template(k) << missing_template(k + 1);
03533                             }
03534                             n_assigned.resize(n_clusters);
03535                             missing_template.resize(n_clusters,
03536                                     missing_template.width());
03537                             n_diffs.resize(n_clusters);
03538                             majority.resize(n_clusters, majority.width());
03539                             if (verbosity >= 10)
03540                                 pout << "Cluster " << j << " deleted (" <<
03541                                     n_assigned[j] << " < " <<
03542                                     min_samples_in_cluster << "), there are now "
03543                                     << n_clusters << " clusters." << endl;
03544                         } else {
03545                             // No valid candidate: we reset this cluster
03546                             // randomly.
03547                             int random_pattern =
03548                             PRandom::common(false)->uniform_multinomial_sample(
03549                                     missing_patterns.length());
03550                         missing_template(j) <<
03551                             missing_patterns(random_pattern);
03552                         missing_assign[random_pattern] = j;
03553                             if (verbosity >= 10)
03554                                 pout << "Cluster " << j << " has been reset to"
03555                                      << " a random new center" << endl;
03556                         }
03557                         finished = false;
03558                     } else if (n_assigned[j] == -1) {
03559                         // Note: this case happens only for a newly created
03560                         // center (when we split a cluster in two).
03561                         finished = false;
03562                     }
03563                 }
03564 
03565                 count_iter++;
03566                 if (report_progress)
03567                     pb->update(count_iter);
03568             }
03569             if (finished && verbosity >= 2)
03570                 pout << "K-median stopped after only " << count_iter
03571                      << " iterations" << endl;
03572 
03573             if (finished && verbosity >= 5)
03574                 pout << "Number of points in each cluster: " << n_assigned
03575                      << endl;
03576 
03577             // Because right now we only want to perform updates, we need to
03578             // make sure there will be no need for downdates.
03579             /* Actually we can do downdates now!
03580             for (int i = 0; i < missing_patterns.length(); i++) {
03581                 int assign = missing_assign[i];
03582                 for (int k = 0; k < missing_patterns.width(); k++)
03583                     if (missing_patterns(i, k))
03584                         missing_template(assign, k) = true;
03585             }
03586             */
03587 
03588             // Second step to fill 'sample_to_template'.
03589             for (int i = 0; i < sample_to_template.length(); i++)
03590                 sample_to_template[i] = missing_assign[sample_to_template[i]];
03591 
03592             // Fill in list for each cluster.
03593             TVec< TVec<int> > clusters(missing_template.length());
03594             for (int i = 0; i < missing_patterns.length(); i++)
03595                 clusters[missing_assign[i]].append(i);
03596 
03597             TVec<int> parent;
03598             // Fill in list for each sample.
03599             // TODO Note: cluster_samp and sample_to_template may not really be
03600             // useful.
03601             clusters_samp.resize(missing_template.length());
03602             for (int i = 0; i < clusters_samp.length(); i++)
03603                 clusters_samp[i].resize(0);
03604             for (int i = 0; i < train_set->length(); i++)
03605                 // clusters_samp[missing_assign[sample_to_template[i]]].append(i);
03606                 clusters_samp[sample_to_template[i]].append(i);
03607 
03608             if (efficient_missing == 1 || efficient_missing == 3) {
03609 #ifdef DIRECTED_HACK
03610                 typedef boost::adjacency_list < boost::vecS, boost::vecS,
03611                     boost::directedS,
03612                     boost::property<boost::vertex_distance_t, int>,
03613                     boost::property<boost::edge_weight_t, int > > DistGraph;
03614 #else
03615                 typedef boost::adjacency_list < boost::vecS, boost::vecS,
03616                     boost::undirectedS,
03617                     boost::property<boost::vertex_distance_t, int>,
03618                     boost::property<boost::edge_weight_t, int > > DistGraph;
03619 #endif
03620                 // TODO According to
03621                 // http://boost-consulting.com/boost/libs/graph/doc/adjacency_matrix.html
03622                 // we should be using adjacency_matrix instead!
03623                 // TODO Do I really need all these properties? (in particular
03624                 // the vertex property?)
03625                 typedef std::pair<int, int> Edge;
03626 
03627                 spanning_path.resize(missing_template.length());
03628                 spanning_use_previous.resize(missing_template.length());
03629                 spanning_can_free.resize(missing_template.length());
03630                 for (int tpl = 0; tpl < missing_template.length(); tpl++) {
03631                 // Find minimum spanning tree of the missing patterns' graph.
03632                 TVec<int> cluster_tpl = clusters[tpl];
03633                 int n = cluster_tpl.length();
03634                 n = (n * (n - 1)) / 2;
03635                 if (report_progress && verbosity >= 2)
03636                     pb = new ProgressBar("Building graph of missing patterns",
03637                                          n);
03638 #ifdef DIRECTED_HACK
03639                 n *= 2;
03640 #endif
03641                 TVec<int> weights(n);
03642                 TVec<Edge> edges(n);
03643                 weights.resize(0);
03644                 edges.resize(0);
03645                 int progress = 0;
03646                 /*
03647                 PStream out = openFile("/u/delallea/tmp/edges.amat",
03648                         PStream::raw_ascii, "w");
03649                         */
03650                 for (int i = 0; i < cluster_tpl.length(); i++) {
03651                     for (int j = i + 1; j < cluster_tpl.length(); j++) {
03652                         edges.append( Edge(i, j) );
03653 #ifdef DIRECTED_HACK
03654                         edges.append( Edge(j, i) );
03655 #endif
03656                         int w = 0;
03657 #ifdef DIRECTED_HACK
03658                         int w_minus = 0;
03659 #endif
03660                         bool* missing_i = missing_patterns[cluster_tpl[i]];
03661                         bool* missing_j = missing_patterns[cluster_tpl[j]];
03662                         for (int k = 0; k < missing_patterns.width(); k++) {
03663                             if (*missing_i != *missing_j)
03664 #ifdef DIRECTED_HACK
03665                                 if (*missing_j)
03666                                     w++;
03667                                 else
03668                                     w_minus++;
03669 #else
03670                                 w++;
03671 #endif
03672                             missing_i++;
03673                             missing_j++;
03674                         }
03675 #ifdef DIRECTED_HACK
03676                         weights.append(10 * w + w_minus);
03677                         weights.append(w + 10 * w_minus);
03678 #else
03679                         weights.append(w);
03680 #endif
03681                         /*
03682                         out << "E(" << i << ", " << j << "), ";
03683                         out << w << ", ";
03684                         */
03685                     }
03686                     progress += cluster_tpl.length() - i - 1;
03687                     if (pb)
03688                         pb->update(progress);
03689                 }
03690                 // out.flush();
03691                 parent.resize(0);
03692                 if (edges.isEmpty()) {
03693                     parent.resize(1);
03694                     parent[0] = 0;
03695                 } else {
03696                 Edge* edges_ptr = edges.data();
03697                 DistGraph dist_graph(
03698                         edges_ptr,
03699                         edges_ptr + edges.length(),
03700                         weights.data(), cluster_tpl.length());
03701                 // boost::property_map<DistGraph, boost::edge_weight_t>::type
03702                 //    weightmap = boost::get(boost::edge_weight, dist_graph);
03703                 typedef vector < boost::graph_traits <
03704                                     DistGraph >::vertex_descriptor > Predec;
03705                 Predec pred(boost::num_vertices(dist_graph));
03706                 if (verbosity >= 2)
03707                     pout << "Computing minimum spanning tree... " << flush;
03708                 boost::prim_minimum_spanning_tree(dist_graph, &pred[0]);
03709                 if (verbosity >= 2)
03710                     pout << "Done" << endl;
03711                 // Convert 'pred' to a PLearn parent vector.
03712                 parent.resize(int(pred.size()));
03713                 for (std::size_t i = 0; i != pred.size(); i++)
03714                     parent[int(i)] = int(pred[i]);
03715 
03716                 /*
03717                 // Code to save the graph to display it in Matlab.
03718                 out = openFile("/u/delallea/tmp/tree.amat",
03719                         PStream::raw_ascii, "w");
03720                 for (int i = 0; i < parent.length(); i++)
03721                     if (parent[i] != i)
03722                         out << parent[i] + 1 << " ";
03723                     else
03724                         out << 0 << " ";
03725 
03726                 out = openFile("/u/delallea/tmp/weight.amat",
03727                         PStream::raw_ascii, "w");
03728                 for (int i = 0; i < parent.length(); i++) {
03729                     int j = parent[i];
03730                     // Looking for weight between nodes i and j.
03731                     int w = 0;
03732                     bool* missing_i = missing_patterns[cluster_tpl[i]];
03733                     bool* missing_j = missing_patterns[cluster_tpl[j]];
03734                     for (int k = 0; k < missing_patterns.width(); k++) {
03735                         if (*missing_i != *missing_j)
03736                             w++;
03737                         missing_i++;
03738                         missing_j++;
03739                     }
03740                     out << w << " ";
03741                 }
03742                 */
03743                 // Free memory used by weights and edges.
03744                 weights = TVec<int>();
03745                 edges = TVec<Edge>();
03746 
03747                 }
03748 #if 0
03749                 Mat parent_mat(1, parent.length());
03750                 for (int p = 0; p < parent.length(); p++)
03751                     parent_mat(0, p) = parent[p];
03752                 VMat parent_vm(parent_mat);
03753                 parent_vm->saveAMAT("/u/delallea/tmp/parent.amat", false,
03754                         true);
03755                 // Easy verification of cost.
03756                 int sum_add = 0;
03757                 int sum_min = 0;
03758                 for (int q = 0; q < parent.length(); q++) {
03759                     if (parent[q] == q)
03760                         continue;
03761                     TVec<bool> v1 = missing_patterns(q);
03762                     TVec<bool> v2 = missing_patterns(parent[q]);
03763                     for (int r = 0; r < v1.length(); r++) {
03764                         if (v1[r] && !v2[r])
03765                             sum_add++;
03766                         else if (!v1[r] && v2[r])
03767                             sum_min++;
03768                     }
03769                 }
03770                 pout << "Easy check: " << sum_add << " and " << sum_min <<
03771                     endl;
03772 #endif
03773 
03774                 n = cluster_tpl.length();
03775 #ifdef DIRECTED_HACK
03776 #else
03777                 // Compute list of nodes, from top to bottom.
03778                 TVec<int> top_to_bottom;
03779                 TVec<int> status(n, 0);
03780                 PLASSERT( parent.length() == n );
03781                 // Status: 0 = still has a parent
03782                 //         1 = candidate with no parent
03783                 //         2 = done
03784                 TVec< TVec<int> > children(n);
03785                 for (int i = 0; i < parent.length(); i++)
03786                     if (parent[i] != i)
03787                         children[ parent[i] ].append(i);
03788                     else
03789                         status[int(i)] = 1;
03790                 // Ensure there is only a single one in the resulting tree.
03791                 PLASSERT( status.find(1, status.find(1) + 1) == -1 );
03792                 int count = 0;
03793                 // Now we're ready to loop over all elements.
03794                 while (true) {
03795                     int last_count = count;
03796                     bool loop = false;
03797                     // Find the next candidate with no parent.
03798                     while (status[count] != 1 &&
03799                            (!loop || count != last_count)) {
03800                         count++;
03801                         if (count >= n) {
03802                             count -= n;
03803                             loop = true;
03804                         }
03805                     }
03806                     if (count == last_count && loop) {
03807                         // We must have gone through all nodes.
03808                         PLASSERT( status.find(0) == -1 );
03809                         break;
03810                     }
03811                     status[count] = 2;
03812                     top_to_bottom.append(count);
03813                     TVec<int> child = children[count];
03814                     for (int i = 0; i < child.length(); i++) {
03815                         int j = child[i];
03816                         PLASSERT( status[j] == 0 );
03817                         status[j] = 1;
03818                     }
03819                 }
03820 
03821                 // Initialize messages.
03822                 TVec<int> message_up(n, 0);
03823                 TVec<int> message_down(n, 0);
03824 
03825                 // Upward pass of messages.
03826                 for (int i = n - 1; i >= 0; i--) {
03827                     int k = top_to_bottom[i];
03828                     TVec<int> child = children[k];
03829                     if (child.isEmpty())
03830                         // Leaf node.
03831                         continue;
03832                     int max = -1;
03833                     bool balanced = false;
03834                     for (int j = 0; j < child.length(); j++) {
03835                         int msg_up = message_up[child[j]];
03836                         if (msg_up > max) {
03837                             max = msg_up;
03838                             balanced = false;
03839                         } else if (msg_up == max)
03840                             balanced = true;
03841                     }
03842                     if (balanced)
03843                         max++;
03844                     PLASSERT( max >= 0 );
03845                     message_up[k] = max;
03846                 }
03847 
03848                 // Downward pass of messages.
03849                 for (int q = 0; q < n; q++) {
03850                     int j = top_to_bottom[q];
03851                     int i = parent[j];
03852                     TVec<int> brothers = children[i];
03853                     int max = -1;
03854                     bool balanced = false;
03855                     for (int k = 0; k < brothers.length(); k++) {
03856                         int brother_k = brothers[k];
03857                         if (brother_k == j)
03858                             // We do not consider this node.
03859                             continue;
03860                         int msg_up = message_up[brother_k];
03861                         if (msg_up > max) {
03862                             max = msg_up;
03863                             balanced = false;
03864                         } else if (msg_up == max)
03865                             balanced = true;
03866                     }
03867                     int msg_down = message_down[i];
03868                     if (msg_down > max) {
03869                         max = msg_down;
03870                         balanced = false;
03871                     } else if (msg_down == max)
03872                         balanced = true;
03873                     if (balanced)
03874                         max++;
03875                     // Note that 'max' can be zero when we have only one single
03876                     // point.
03877                     PLASSERT( max > 0 || n == 1);
03878                     message_down[j] = max;
03879                 }
03880 
03881                 // Compute the cost.
03882                 TVec<int> cost(n, -1);
03883                 for (int i = 0; i < n; i++) {
03884                     int msg_up = message_up[i];
03885                     int msg_down = message_down[i];
03886                     if (msg_up == msg_down)
03887                         cost[i] = msg_up + 1;
03888                     else
03889                         cost[i] = max(msg_up, msg_down);
03890                 }
03891                 int min_cost = min(cost);
03892                 if (verbosity >= 5)
03893                     pout << "Minimum cost: " << min_cost << endl;
03894 
03895                 // Find the node to start from.
03896                 int start_node = argmin(cost);
03897                 PLASSERT( cost[start_node] == min_cost );
03898 #endif // DIRECTED_HACK
03899 
03900                 // Compute a node ordering giving rise to the mininum cost.
03901                 TVec<int>& span_path = spanning_path[tpl];
03902                 TVec<bool>& span_use_previous = spanning_use_previous[tpl];
03903                 TVec<bool>& span_can_free = spanning_can_free[tpl];
03904                 span_path.resize(0);
03905                 span_use_previous.resize(0);
03906                 span_can_free.resize(0);
03907                 // Note: 'free_previous' is set to 'false', meaning we might be
03908                 // using one more matrix than necessary. TODO Investigate
03909                 // exactly how this should be done.
03910 #ifdef DIRECTED_HACK
03911                 // Compute list of nodes, in the order they will be visited in
03912                 // the optimization process. Note that this may not be optimal
03913                 // memory-wise.
03914                 TVec< TVec<int> > children(n);
03915                 // First find the root and fill the children lists.
03916                 int root = -1;
03917                 for (int i = 0; i < parent.length(); i++)
03918                     if (parent[i] == i)
03919                         root = i;
03920                     else
03921                         children[ parent[i] ].append(i);
03922                 PLASSERT( root >= 0 );
03923                 // Then deduce the ordered list of nodes.
03924                 create_list(parent, children, span_path, span_use_previous,
03925                             span_can_free, root, true, false);
03926 #else
03927                 traverse_tree(span_path, span_can_free, span_use_previous,
03928                               false, true, start_node, -1, parent,
03929                               children, message_up, message_down);
03930 #endif
03931                 PLASSERT( span_path.length()          == n );
03932                 PLASSERT( span_can_free.length()      == n );
03933                 PLASSERT( span_use_previous.length()  == n );
03934                 // At this point the index in 'span_path' are the index within
03935                 // the cluster 'tpl': we replace them by the global sample
03936                 // index.
03937                 for (int i = 0; i < span_path.length(); i++)
03938                     span_path[i] = cluster_tpl[span_path[i]];
03939 
03940                 // Consistency check: compute the average distance from one
03941                 // node to the next in the path.
03942                 int sum = 0;
03943                 int counter = 0;
03944                 Vec stats_diff(missing_patterns.width() + 1);
03945                 stats_diff.fill(0);
03946                 for (int i = 0; i < span_path.length() - 1; i++) {
03947                     int first = span_path[i];
03948                     int next = span_path[i + 1];
03949                     int dist = 0;
03950                     for (int k = 0; k < missing_patterns.width(); k++)
03951                         if (missing_patterns(first, k) !=
03952                             missing_patterns(next, k))
03953                             dist++;
03954                     sum += dist;
03955                     counter ++;
03956                     stats_diff[dist]++;
03957                 }
03958                 real avg_dist = 0;
03959                 if (counter > 0)
03960                     avg_dist = sum / real(counter);
03961                 // TODO Note that the quantity below is not exactly what we're
03962                 // interested in: it does not take into account the fact that
03963                 // we come back in the tree (branch switching).
03964                 if (verbosity >= 5)
03965                     pout << "Average distance to next pattern: " << avg_dist
03966                          << endl;
03967                 /*
03968                 Mat tomat = stats_diff.toMat(stats_diff.length(), 1);
03969                 VMat save_vmat(tomat);
03970                 save_vmat->saveAMAT("/u/delallea/tmp/span_" +
03971                         tostring(efficient_k_median) + ".amat", true, true);
03972                         */
03973 
03974               }
03975                 // Transform 'spanning_path' to obtain a path through samples,
03976                 // instead of a path through missing patterns.
03977                 // First get the list of samples associated to each missing
03978                 // pattern.
03979                 TVec<int> the_path;
03980                 TVec<bool> the_can_free;
03981                 TVec<bool> the_use_prev;
03982                 sample_to_path_index.resize(train_set->length());
03983                 sample_to_path_index.fill(-1);
03984                 for (int i = 0; i < spanning_path.length(); i++) {
03985                     TVec<int>& span_path = spanning_path[i];
03986                     TVec<bool>& span_can_free = spanning_can_free[i];
03987                     TVec<bool>& span_use_prev = spanning_use_previous[i];
03988 
03989                     the_path.resize(span_path.length());
03990                     the_can_free.resize(span_can_free.length());
03991                     the_use_prev.resize(span_use_prev.length());
03992                     the_path     << span_path;
03993                     the_can_free << span_can_free;
03994                     the_use_prev << span_use_prev;
03995                     span_path.resize(0);
03996                     span_can_free.resize(0);
03997                     span_use_prev.resize(0);
03998                     int count = 0;
03999                     for (int j = 0; j < the_path.length(); j++) {
04000                         const TVec<int>& samples_list =
04001                             pattern_to_samples[the_path[j]];
04002                         span_path.append(samples_list);
04003                         span_can_free.append(the_can_free[j]);
04004                         span_use_prev.append(the_use_prev[j]);
04005                         for (int k = 0; k < samples_list.length(); k++) {
04006                             PLASSERT(sample_to_path_index[samples_list[k]]==-1);
04007                             sample_to_path_index[samples_list[k]] = count;
04008                             count++;
04009                             // Other samples with same pattern will reuse the
04010                             // same covariance matrix. However, right now, it
04011                             // is not completely efficient since the matrix
04012                             // will still be copied.
04013                             if (k > 0) {
04014                                 span_can_free.append(true);
04015                                 span_use_prev.append(true);
04016                             }
04017                         }
04018                     }
04019 #ifdef BOUNDCHECK
04020                     int n_samples_in_cluster = clusters_samp[i].length();
04021                     PLASSERT( span_path.length()      == n_samples_in_cluster );
04022                     PLASSERT( span_can_free.length()  == n_samples_in_cluster );
04023                     PLASSERT( span_use_prev.length()  == n_samples_in_cluster );
04024 #endif
04025                 }
04026                 // Make sure all samples belong to a path.
04027                 PLASSERT( sample_to_path_index.find(-1) == -1 );
04028             }
04029 
04030             // Compute some statistics on the distances to templates.
04031 #if 0
04032             Vec current_vec, previous_vec;
04033             Vec count_added(10000, real(0));
04034             Vec count_removed(10000, real(0));
04035             int max_added = 0;
04036             int max_removed = 0;
04037             int sum_added = 0;
04038             int sum_removed = 0;
04039             int counter_added = 0;
04040             int counter_removed = 0;
04041             map<int, int> current_to_previous;
04042             TVec<int> is_there(train_set->length(), 0);
04043             for (int i = 0; i < spanning_path.length(); i++) {
04044                 TVec<int>& span_path = spanning_path[i];
04045                 TVec<bool>& span_use_prev = spanning_use_previous[i];
04046                 TVec<bool>& span_can_free = spanning_can_free[i];
04047                 TVec<int> cached_nodes;
04048                 cached_nodes.append(0);
04049                 int queue_index = 0;
04050                 for (int k = 1; k < span_path.length(); k++) {
04051                     if (span_use_prev[k])
04052                         queue_index = cached_nodes.length() - 1;
04053                     else
04054                         queue_index = cached_nodes.length() - 2;
04055                     int previous = cached_nodes[queue_index];
04056                     int index_current = span_path[k];
04057                     train_set->getExample(index_current,
04058                                           input, target, weight);
04059                     current_vec.resize(input.length());
04060                     current_vec << input;
04061 
04062                     int index_previous = span_path[previous];
04063                     train_set->getExample(index_previous, input,
04064                                           target, weight);
04065                     previous_vec.resize(input.length());
04066                     previous_vec << input;
04067                     is_there[index_current] = 1;
04068                     is_there[index_previous] = 1;
04069                     int current_pattern = sample_to_pattern[index_current];
04070                     int previous_pattern = sample_to_pattern[index_previous];
04071                     if (current_pattern          == previous_pattern ||
04072                         parent[current_pattern]  == previous_pattern ||
04073                         parent[previous_pattern] == current_pattern)
04074                     {} else
04075                     {
04076                         PLERROR("Houston, we have a problem!");
04077                     }
04078                     if (current_pattern != previous_pattern)
04079                         current_to_previous[index_current] = index_previous;
04080                     int n_added = 0;
04081                     int n_removed = 0;
04082                     for (int q = 0; q < input.length(); q++) {
04083                         if (is_missing(current_vec[q])) {
04084                             if (!missing_patterns(current_pattern, q))
04085                                 PLERROR("No way!");
04086                         } else if (missing_patterns(current_pattern, q))
04087                             PLERROR("Way no!");
04088                             
04089                         if (is_missing(previous_vec[q]) &&
04090                             !is_missing(current_vec[q]))
04091                             n_added++;
04092                         else if (!is_missing(previous_vec[q]) &&
04093                                  is_missing(current_vec[q]))
04094                             n_removed++;
04095                     }
04096                     count_added[n_added]++;
04097                     count_removed[n_removed]++;
04098                     sum_added += n_added;
04099                     sum_removed += n_removed;
04100                     counter_added++;
04101                     counter_removed++;
04102                     if (n_added > max_added)
04103                         max_added = n_added;
04104                     if (n_removed > max_removed)
04105                         max_removed = n_removed;
04106                     if (span_can_free[k])
04107                         cached_nodes.resize(queue_index);
04108                     else if (!span_use_prev[k])
04109                         cached_nodes.resize(cached_nodes.length() - 1);
04110                     cached_nodes.append(k);
04111                 }
04112             }
04113             if (is_there.find(0) != -1)
04114                 PLERROR("OMG!");
04115             pout << "Mean added  : " << sum_added << "/" << counter_added << " = "
04116                 << sum_added / real(counter_added) << endl;
04117             pout << "Mean removed: " << sum_removed << "/" << counter_removed << " = "
04118                 << sum_removed / real(counter_removed) << endl;
04119 
04120             Mat cur_to_prev(current_to_previous.size(), 2);
04121             map<int, int>::const_iterator it = current_to_previous.begin();
04122             int count_i = 0;
04123             for (; it != current_to_previous.end(); it++, count_i++) {
04124                 if (it->first < it->second) {
04125                     cur_to_prev(count_i, 0) = it->first;
04126                     cur_to_prev(count_i, 1) = it->second;
04127                 } else {
04128                     cur_to_prev(count_i, 0) = it->second;
04129                     cur_to_prev(count_i, 1) = it->first;
04130                 }
04131             }
04132             PP<SortRowsVMatrix> cur_to_prev_vm = new SortRowsVMatrix();
04133             cur_to_prev_vm->source = VMat(cur_to_prev);
04134             cur_to_prev_vm->sort_columns = TVec<int>(0, 1, 1);
04135             cur_to_prev_vm->build();
04136             cur_to_prev_vm->saveAMAT("/u/delallea/tmp/cur_to_prev.amat",
04137                     false, true);
04138 
04139             count_added.resize(max_added + 1);
04140             count_removed.resize(max_removed + 1);
04141             Mat added_mat = count_added.toMat(1, count_added.length());
04142             Mat removed_mat = count_removed.toMat(1, count_removed.length());
04143             VMat(added_mat)->saveAMAT("/u/delallea/tmp/added.amat", false,
04144                     true);
04145             VMat(removed_mat)->saveAMAT("/u/delallea/tmp/removed.amat", false,
04146                     true);
04147 
04148             /*
04149             Vec stats_diff(missing_patterns.width());
04150             stats_diff.fill(0);
04151             for (int i = 0; i < missing_patterns.length(); i++) {
04152                 int assign = missing_assign[i];
04153                 int n_diffs = 0;
04154                 for (int k = 0; k < missing_patterns.width(); k++)
04155                     if (missing_patterns(i, k) != missing_template(assign, k))
04156                         n_diffs++;
04157                 stats_diff[n_diffs]++;
04158             }
04159             Mat tomat = stats_diff.toMat(stats_diff.length(), 1);
04160             VMat save_vmat(tomat);
04161             save_vmat->saveAMAT("/u/delallea/tmp/save_" +
04162                     tostring(efficient_k_median) + ".amat", true, true);
04163             stats_diff.resize(missing_template.length());
04164             stats_diff.fill(0);
04165             for (int i = 0; i < missing_patterns.length(); i++) {
04166                 stats_diff[missing_assign[i]]++;
04167             }
04168             tomat = stats_diff.toMat(stats_diff.length(), 1);
04169             save_vmat = VMat(tomat);
04170             save_vmat->saveAMAT("/u/delallea/tmp/clust_" +
04171                     tostring(efficient_k_median) + ".amat", true, true);
04172             Mat dist_mat(missing_template.length(),
04173                          missing_template.length());
04174             for (int i = 0; i < missing_template.length(); i++) {
04175                 for (int j = 0; j < missing_template.length(); j++) {
04176                     int n_diffs = 0;
04177                     for (int k = 0; k < missing_template.width(); k++)
04178                         if (missing_template(i, k) != missing_template(j, k))
04179                             n_diffs++;
04180                     dist_mat(i, j) = n_diffs;
04181                 }
04182             }
04183             save_vmat = VMat(dist_mat);
04184             save_vmat->saveAMAT("/u/delallea/tmp/dist_" +
04185                     tostring(efficient_k_median) + ".amat", true, true);
04186                     */
04187 #endif
04188         }
04189 
04190         // n_tries.resize(0); Old code, may be removed in the future...
04191         resizeDataBeforeTraining();
04192 
04193         // Get sample weights.
04194         if (train_set->weightsize() <= 0)
04195             initial_weights.fill(1);
04196         else
04197             getInitialWeightsFrom(train_set);
04198 
04199         // Perform K-means to initialize the centers of the mixture.
04200         TVec<int> clust_idx;  // Store the cluster index for each sample.
04201         kmeans(train_set, L, clust_idx, center, kmeans_iterations);
04202 
04203         // Initialize posteriors: P(j | s_i) = 0 if s_i is not in the j-th
04204         // cluster, and 1 otherwise.
04205         posteriors.fill(0);
04206         for (int i = 0; i < nsamples; i++)
04207             posteriors(i, clust_idx[i]) = 1;
04208 
04209         // Initialize everything from the K-Means clustering result.
04210         updateSampleWeights();
04211         computeMixtureWeights(false);
04212         computeMeansAndCovariances();
04213         precomputeAllGaussianLogCoefficients();
04214         /*
04215         Mat alpha_m(alpha.toMat(1, alpha.length()));
04216         VMat alpha_vm(alpha_m);
04217         alpha_vm->saveAMAT("/u/delallea/tmp/alpha.amat", false, true);
04218         VMat center_vm(center);
04219         center_vm->saveAMAT("/u/delallea/tmp/center.amat", false, true);
04220         PLASSERT(eigenvalues.width() == D);
04221         for (int j = 0; j < L; j++) {
04222             Vec eigenvals = eigenvalues(j);
04223             Mat& eigenvecs = eigenvectors[j];
04224             Mat covar(D, D);
04225             covar.fill(0);
04226             for (int k = 0; k < D; k++)
04227                 externalProductScaleAcc(covar, eigenvecs(k), eigenvecs(k),
04228                         eigenvals[k]);
04229             VMat covar_vm(covar);
04230             string filename = "/u/delallea/tmp/covar_" + tostring(j) + ".amat";
04231             covar_vm->saveAMAT(filename, false, true);
04232         }
04233         */
04234         ptimer->stopTimer("init_time");
04235     }
04236 
04237     PP<ProgressBar> pb;
04238     int n_steps = nstages - stage;
04239     if (report_progress)
04240         pb = new ProgressBar("Training GaussMix", n_steps);
04241 
04242     /*
04243     TVec<Mat> save_center;
04244     save_center.resize(L);
04245     for (int i = 0; i < save_center.length(); i++)
04246         save_center[i].resize(n_steps, D);
04247     */
04248     int count_step = 0;
04249 
04250     bool replaced_gaussian = false;
04251     while (stage < nstages) {
04252         do {
04253             computePosteriors();
04254             updateSampleWeights();
04255             replaced_gaussian = computeMixtureWeights(true);
04256             // Note: for debugging purpose, 'true' may be replaced by 'false'
04257             // to ensure no Gaussian is removed.
04258         } while (replaced_gaussian);
04259         computeMeansAndCovariances();
04260         precomputeAllGaussianLogCoefficients();
04261         // for (int i = 0; i < save_center.length(); i++)
04262         //    save_center[i](count_step) << center(i);
04263         count_step++;
04264         stage++;
04265         if (report_progress)
04266             pb->update(n_steps - nstages + stage);
04267         /*
04268         if (verbosity >= 10)
04269             pout << "Highest eigenvalue: " << max(eigenvalues) << endl;
04270         */
04271     }
04272 
04273     // Restore original predictor and predicted sizes if necessary.
04274     if (need_restore_sizes) {
04275         setPredictorPredictedSizes(backup_predictor_size,
04276                                    backup_predicted_size);
04277         // Because the sizes have changed, some data may need to be resized
04278         // accordingly.
04279         resizeDataBeforeUsing();
04280     }
04281 
04282     /*
04283     for (int i = 0; i < save_center.length(); i++) {
04284         VMat vm(save_center[i]);
04285         vm->saveAMAT("save_center_" + tostring(i) + ".amat");
04286     }
04287     */
04288     ptimer->stopTimer("training_time");
04289     static Vec train_stats_update;
04290     train_stats_update.resize(2);
04291     train_stats_update[0] = ptimer->getTimer("init_time");
04292     train_stats_update[1] = ptimer->getTimer("training_time");
04293     train_stats->forget(); // Forget potential old total training time.
04294     train_stats->update(train_stats_update);
04295 }
04296 
04298 // traverse_tree //
04300 void GaussMix::traverse_tree(TVec<int>& path,
04301                              TVec<bool>& span_can_free,
04302                              TVec<bool>& span_use_previous,
04303                              bool free_previous,
04304                              bool use_previous,
04305                              int index_node, int previous_node,
04306                              const TVec<int>& parent,
04307                              const TVec< TVec<int> >& children,
04308                              const TVec<int>& message_up,
04309                              const TVec<int>& message_down)
04310 {
04311     TVec<int> candidates;
04312     TVec<int> messages;
04313     TVec<int> child = children[index_node];
04314     for (int i = 0; i < child.length(); i++)
04315         if (child[i] != previous_node)
04316             candidates.append(child[i]);
04317     for (int i = 0; i < candidates.length(); i++)
04318         messages.append(message_up[candidates[i]]);
04319     if (parent[index_node] != index_node &&
04320         parent[index_node] != previous_node)
04321     {
04322         candidates.append(parent[index_node]);
04323         messages.append(message_down[parent[index_node]]);
04324     }
04325 
04326     if (child.length() > 1000)
04327         PLWARNING("In GaussMix::traverse_tree - Should implement a faster "
04328                   "sorting algorithm");
04329 
04330     path.append(index_node);
04331     span_can_free.append(free_previous);
04332     span_use_previous.append(use_previous);
04333 
04334     for (int i = 0; i < candidates.length(); i++) {
04335         int arg_min = i;
04336         for (int j = i + 1; j < candidates.length(); j++)
04337             if (messages[j] < messages[arg_min])
04338                 arg_min = j;
04339         int tmp = messages[i];
04340         messages[i] = messages[arg_min];
04341         messages[arg_min] = tmp;
04342         tmp = candidates[i];
04343         candidates[i] = candidates[arg_min];
04344         candidates[arg_min] = tmp;
04345         int node = candidates[i];
04346         PLASSERT( node != index_node && node != previous_node );
04347         bool can_free = (i == candidates.length() - 1);
04348         bool can_use_previous = (i == 0);
04349         traverse_tree(path, span_can_free, span_use_previous, can_free,
04350                 can_use_previous, node, index_node, parent,
04351                 children, message_up, message_down);
04352     }
04353 }
04354 
04356 // unknownOutput //
04358 void GaussMix::unknownOutput(char def, const Vec& input, Vec& output, int& k) const {
04359     switch(def) {
04360     case 'p': // Log posteriors P(j | y).
04361     {
04362         output.resize(k + L);
04363         // Compute p(y | x).
04364         real log_p_y_x = log_density(predicted_part);
04365         // This also fills the vector 'log_likelihood_dens' with likelihoods p(y,j | x),
04366         // which is exactly what we need in order to compute the posteriors.
04367         for (int j = 0; j < L; j++)
04368             output[j + k] = log_likelihood_dens[j] - log_p_y_x;
04369         k += L;
04370         break;
04371     }
04372     default:
04373         inherited::unknownOutput(def, input, output, k);
04374         break;
04375     }
04376 }
04377 
04379 // updateSampleWeights //
04381 void GaussMix::updateSampleWeights() {
04382     for (int j = 0; j < L; j++) {
04383         updated_weights(j) << initial_weights;
04384         columnmatrix(updated_weights(j)) *= posteriors.column(j);
04385     }
04386 }
04387 
04389 // survival_fn //
04391 real GaussMix::survival_fn(const Vec& x) const
04392 {
04393     //PLERROR("survival_fn not implemented for GaussMix"); return 0.0;
04394     return MISSING_VALUE;
04395 }
04396 
04398 // cdf //
04400 real GaussMix::cdf(const Vec& x) const
04401 {
04402     //PLERROR("cdf not implemented for GaussMix"); return 0.0;
04403     return MISSING_VALUE;
04404 }
04405 
04407 // variance //
04409 void GaussMix::variance(Mat& cov) const
04410 {
04411     // TODO Variance could be at least implemented for L == 1.
04412     PLERROR("variance not implemented for GaussMix");
04413 }
04414 
04415 } // end of namespace PLearn
04416 
04417 
04418 /*
04419   Local Variables:
04420   mode:c++
04421   c-basic-offset:4
04422   c-file-style:"stroustrup"
04423   c-file-offsets:((innamespace . 0)(inline-open . 0))
04424   indent-tabs-mode:nil
04425   fill-column:79
04426   End:
04427 */
04428 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines