PLearn 0.1
GaussianContinuum.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianContinuum.cc
00004 //
00005 // Copyright (C) 2004 Yoshua Bengio & Hugo Larochelle 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036    * $Id: GaussianContinuum.cc 6508 2006-12-15 02:35:49Z lamblin $
00037    ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio & Martin Monperrus
00040 
00044 #include "GaussianContinuum.h"
00045 #include <plearn/vmat/LocalNeighborsDifferencesVMatrix.h>
00046 #include <plearn/var/ProductVariable.h>
00047 #include <plearn/var/PlusVariable.h>
00048 #include <plearn/var/SoftplusVariable.h>
00049 #include <plearn/var/VarRowsVariable.h>
00050 #include <plearn/var/VarRowVariable.h>
00051 #include <plearn/var/SourceVariable.h>
00052 #include <plearn/var/Var_operators.h>
00053 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00054 #include <plearn/math/random.h>
00055 #include <plearn/var/SumOfVariable.h>
00056 #include <plearn/var/TanhVariable.h>
00057 #include <plearn/var/NllSemisphericalGaussianVariable.h>
00058 #include <plearn/var/DiagonalizedFactorsProductVariable.h>
00059 #include <plearn/math/random.h>
00060 #include <plearn/math/plapack.h>
00061 #include <plearn/var/ColumnSumVariable.h>
00062 #include <plearn/vmat/VMat_basic_stats.h>
00063 #include <plearn/vmat/ConcatRowsVMatrix.h>
00064 #include <plearn/vmat/SubVMatrix.h>
00065 #include <plearn/var/PDistributionVariable.h>
00066 #include <plearn_learners/distributions/UniformDistribution.h>
00067 #include <plearn_learners/distributions/GaussianDistribution.h>
00068 #include <plearn/display/DisplayUtils.h>
00069 #include <plearn/opt/GradientOptimizer.h>
00070 #include <plearn/var/TransposeVariable.h>
00071 #include <plearn/var/Var_utils.h>
00072 #include <plearn/var/ConcatRowsVariable.h>
00073 #include <plearn/var/RowSumVariable.h>
00074 #include <plearn/var/NoBpropVariable.h>
00075 #include <plearn/var/ReshapeVariable.h>
00076 #include <plearn/var/SquareVariable.h>
00077 #include <plearn/var/ExpVariable.h>
00078 #include <plearn/io/load_and_save.h>
00079 #include <plearn/vmat/VMat_computeNearestNeighbors.h>
00080 #include <plearn/vmat/FractionSplitter.h>
00081 #include <plearn/vmat/RepeatSplitter.h>
00082 
00083 namespace PLearn {
00084 using namespace std;
00085 
00086 // les neurones de la couche cachée correspondent à des hyperplans
00087 // la smartInitialization consiste a initialiser ces hyperplans passant
00088 // des points du train_set pris aleatoirement
00089 // comme ca, on est sur de bien quadriller l'espace des points.
00090 // le c correspond a une sorte de contre weight decay
00091 // plus c est grand plus on aura des poids grand et plus on a des neurones tranchés dans l'espace
00092 Mat smartInitialization(VMat v, int n, real c, real regularization)
00093 {
00094   int l = v->length();
00095   int w = v->width();
00096   
00097   Mat result(n,w);
00098   Mat temp(w,w);
00099   Vec b(w);
00100   b<<c;
00101   
00102   int i,j;
00103 
00104   for (i=0;i<n;++i)
00105   {
00106     temp.clear();
00107     for (j=0;j<w;++j)
00108     {
00109       v->getRow(uniform_multinomial_sample(l),temp(j));
00110     }
00111     // regularization pour eviter 1/ quand on a tire deux fois le meme indice  2/ quand les points sont trops proches
00112     regularizeMatrix(temp,regularization);
00113     result(i) << solveLinearSystem(temp, b);
00114   }
00115   return result;
00116 }
00117 
00118 GaussianContinuum::GaussianContinuum() 
00119 /* ### Initialize all fields to their default value here */
00120   : weight_mu_and_tangent(0), include_current_point(false), random_walk_step_prop(1), use_noise(false),use_noise_direction(false), noise(-1), noise_type("uniform"), n_random_walk_step(0), n_random_walk_per_point(0),save_image_mat(false),walk_on_noise(true),min_sigma(0.00001), min_diff(0.01), min_p_x(0.001),print_parameters(false),sm_bigger_than_sn(true), n_neighbors(5), n_neighbors_density(-1), mu_n_neighbors(2), n_dim(1), compute_cost_every_n_epochs(5), variances_transfer_function("softplus"), validation_prop(0), architecture_type("single_neural_network"),
00121     n_hidden_units(-1), batch_size(1), norm_penalization(0), svd_threshold(1e-5)
00122     
00123 {
00124 }
00125 
00126 PLEARN_IMPLEMENT_OBJECT(GaussianContinuum, "Learns a continuous (uncountable) Gaussian mixture with non-local parametrization",
00127                         "This learner implicitly estimates the density of the data through\n"
00128                         "a generalization of the Gaussian mixture model and of the TangentLearner\n"
00129                         "algorithm (see help on that class). The density is the fixed point of\n"
00130                         "a random walk {z_t} that follows the following transition probabilities:\n"
00131                         "   z_{t+1} sampled from a Gaussian associated with z_t, centered\n"
00132                         "   at z_t + mu(z_t), with covariance matrix S(z_t).\n"
00133                         "The semantic of that random walk is the following (and that is how\n"
00134                         "it will be estimated). Given a point z_t, the sample z_{t+1} represents\n"
00135                         "a 'near neighbor' of z_t. We assume that the density is smooth enough\n"
00136                         "that the cloud of 'near neighbors' around z_t can be modeled by a Gaussian.\n"
00137                         "The functions mu(.) and S(.) have globally estimated parameters (for example\n"
00138                         "using neural nets or linear functions of x, or linear functions of a basis).\n"
00139                         "Here we suppose that the eigenvalues of S(.) come from two groups:\n"
00140                         "the first group should correspond to locally estimated principal\n"
00141                         "directions of variations and there are no constraints on these eigenvalues\n"
00142                         "(except that they are positive), while the second group should correspond\n"
00143                         "to 'noise' directions, that have all the same value sigma2_noise\n"
00144                         "i.e. it is not necessary to explicitly model the directions of variations\n"
00145                         "(the eigenvectors) associated with the second group. In general we expect\n"
00146                         "sigma2_noise to be small compared to the first group eigenvalues, which\n"
00147                         "means that the Gaussians are flat in the corresponding directions, and\n"
00148                         "that the first group variations correspond to modeling a manifold near\n"
00149                         "which most of the data lie. Optionally, an embedding corresponding\n"
00150                         "to variations associated with the first group of eigenvalues can be learnt\n"
00151                         "by choosing for the architecture_type option a value of the form embedding_*.\n"
00152                         "Although the density is not available in closed form, it is easy (but maybe slow)\n"
00153                         "to sample from it: pick one of the training examples at random and then\n"
00154                         "follow the random walk (ideally, a long time). It is also possible in\n"
00155                         "principle to obtain a numerical estimate of the density at a point x,\n"
00156                         "by sampling enough random walk points around x.\n"
00157                         );
00158 
00159 /* MATHEMATICAL DETAILS
00160 
00161 * Fixed point of the random walk is the density:
00162 
00163   Let p(Z_t) represent the density of the t-th random walk sample Z_t (a r.v.).
00164   To obtain p(Z_{t+1}) we sample Z_t from p(Z_t) and then sample Z_{t+1}
00165   from p(Z_{t+1}|Z_t), using the Gaussian with mean z_t + mu(z_t) and 
00166   covariance matrix S(z_t). Thus p(Z_{t+1}=x) = \int_y p(Z_t=y) p(Z_{t+1}=x|Z_t=y) dy.
00167   Then at the fixed point we should have p(Z_t) = p(Z_{t+1)=p(X), i.e.
00168     p(x) = \int_y p(y) p(x|y) dy
00169   which has the same form as a Gaussian mixture, with p(x|y) Gaussian in x,
00170   and the sum replaced by an integral (i.e. there is an uncountable 'number'
00171   of Gaussian components, one at each position y in space). It is possible
00172   to achieve this only because each Gaussian component p(x|y) has mean and variance that
00173   depend on y and on global parameters theta, and those parameters are estimated
00174   from data everywhere, and might generalize to new places.
00175 
00176 * How to estimate the density numerically:
00177 
00178   Although the density cannot be computed exactly, it can be estimated
00179   using a Gaussian mixture with a finite number of components. Suppose that
00180   we have sampled a set R of random samples on the above random walks
00181   (including also the training data, which we know to come from the
00182   true density). Then we obtain a Monte-Carlo approximation of
00183   the above equation as follows:
00184     p(x) ~=~ average_t p(x|x_t)
00185 
00186   where x_t is in R (i.e. sampled from the distribution p(y)).  This is
00187   simply a uniformly weighted Gaussian mixture centered on the data points
00188   and on the random walk points. If we want to get a more precise estimator
00189   of p(x), we should sample points more often around x, but then correct
00190   this bias, using importance sampling. A simple way to do this is to
00191   choose more points from to put in R in such a way as to give more
00192   preference to the near neighbors of x in R. Let q_x(x_t) be a discrete
00193   distribution over the elements of R which is non-zero everywhere but
00194   puts more weight on the neighbors of x. Then we create new samples,
00195   to be put in a set R', by performing random walks starting from 
00196   points of R with probability q_x(x_t). The resulting estimator
00197   would be
00198     p(x) ~=~ average_{x_t in R'} p(x|x_t) / (q_x(x_t) |R'|).
00199 
00200 * How to estimate mu(x) and S(x)?
00201 
00202   We propose to estimate mu(x) and S(x) by minimizing the negative
00203   log-likelihood of the neighbors x_j of each training point x_i,
00204   according to the Gaussian with mean x_i + mu(x_i) and covariance
00205   matrix S(x_i), plus possibly some regularization term, such
00206   as weight decay on the parameters of the functions. In this 
00207   implementation training proceeds by stochastic gradient, visiting
00208   each example x_i (with all of its neighbors) and then making
00209   a parameter update.
00210 
00211 * Parametrization of mu(x) and S(x):
00212 
00213   mu(x) is simply the output of a linear or neural-net function of x.
00214   S(x) is more difficult to parametrize. We consider two main solutions
00215   here: (1) semi-spherical (only two variances are considered: on the
00216   manifold and orthogonal to it), or (2) factor model with Cholesky
00217   decomposition for the manifold directions and a single shared variance
00218   for the directions orthogonal to the manifold. Note that we
00219   would prefer to parametrize S(x) in such a way as to make it
00220   easy to compute , v'S(x)^{-1}v for any vector v, and log(det(S(x))).
00221 
00222   Consider the derivative of NLL == -log(p(y)) wrt log(p(y|x)):
00223     d(-log(p(y)))/d(log(p(y|x))) = -p(y|x)p(x)/p(y) = -p(x|y).
00224   (this also corresponds to the 'posterior' factor in EM).
00225 
00226   The conditional log-likelihood  log(p(y|x)) for a neighbor y
00227   of an example x is written
00228     log(p(y|x)) = -0.5(y-x-mu(x))'S(x)^{-1}(y-x-mu(x)) - 0.5*log(det(S(x))) - (n/2)log(2pi).
00229 
00230   Hence dNLL/dtheta is obtained from
00231     0.5 p(x|y) (d((y-x-mu(x))'S(x)^{-1}(y-x-mu(x)))/dtheta + d(log(det(S(x))))/dtheta)       (1)
00232   which gives significant weight only to the near neighbors y of x.
00233 
00234   The gradient wrt mu(x) is in particular
00235     dNLL/dmu(x) = p(x|y) S(x)^{-1} (mu(x)+x-y).
00236 
00237 * Semi-spherical covariance model:
00238 
00239   The idea of the semi-spherical model is that we assume that the neighbors difference
00240   vector y-x has two components: (a) one along the tangent plane of the manifold, spanned
00241   by a set vectors F_i(x), the rows of F(x) a matrix-valued unconstrained estimated function,
00242   and (b) one orthogonal to that tangent plane. We write z = y-x-mu(x) = z_m + z_n, with z_m the
00243   component on the manifold and z_n the noise component. Since we want z_n orthogonal
00244   to the tangent plane, we choose it such that F z_n = 0. Since z_m is in the span
00245   of the rows F_i of F, we can write it as a linear combination of these rows, with
00246   weights w_i. Let w=(w_1,...w_d), then z_m = F'w. To find w, it is enough to find
00247   the projection of y-x along the tangent plane, which corresponds to the shortest
00248   possible z_n. Minimizing the norm of z_n, equal to ||z-F'w||^2 yields the first order equation
00249       F(z-F'w) = 0
00250   i.e. the solution is
00251       w = (FF')^{-1} Fz.
00252   In practice, this will be done by using a singular value decomposition of F',
00253       F' = U D V'
00254   so w = V D^{-2} V' F z = V D^{-2} V' V D U' z = V D^{-1} U' z. Note that
00255   z_m' z_n = w'F (z - F'w) = 0 hence z_m orthogonal to z_n.
00256 
00257   By our model, the covariance matrix can be decomposed in two parts,
00258     S(x) = sigma2_manifold U U'  + sigma2_noise N N'
00259   where M=[U | N] is the matrix whose columns are eigenvectors of S(x), with U the e-vectors
00260   along the manifold directions and N the e-vectors along the noise directions.
00261   It is easy to show that one does not need to explicitly represent the
00262   noise eigenvectors N, because both the columns of U and the columns of N
00263   are also eigenvectors of the identity matrix. Hence
00264    S(x) = (sigma2_manifold - sigma2_noise) U U' + sigma2_noise I.
00265   with I the nxn identity matrix.
00266   This can be shown by re-writing  I = [U | N]' [U | N] and appriate algebra.
00267 
00268   It is also easy to show that S(x)^{-1} z = (1/sigma2_manifold) z_m + (1/sigma2_noise) z_n,
00269   that the quadratic form is 
00270      z' S(x)^{-1} z = (1/sigma2_manifold) ||z_m||^2 + (1/sigma2_noise) ||z_n||^2,          (2)
00271   and that 
00272      log(det(S(x))) = d log(sigma2_manifold) + (n-d) log(sigma2_noise).                    (3)
00273 
00274   How to show the above:
00275     @ We have defined S(x) = M diag(s) M' where s is a vector whose first d elements are sigma2_manifold
00276     and last n-d elements are sigma2_noise, and M=[U | N] are the eigenvectors, or
00277       S(x) = sum_{i=1}^d sigma2_manifold U_i U_i' + sum_{i=d+1}^n sigma2_noise N_i N_i'
00278     where U_i is a column of U and N_i a column of N. Hence
00279       S(x) = sigma2_manifold sum_{i=1}^d U_i U_i' - sigma2_noise sum_{i=1}^d U_i U_i'
00280              + sigma2_noise (sum_{i=1}^d U_i U_i' + sum_{i=d+1}^n  N_i N_i')
00281            = (sigma2_manifold - sigma2_noise) sum_{i=1}^d U_i U_i' + sigma2_noise I 
00282            = (sigma2_manifold - sigma2_noise) U U' + sigma2_noise I 
00283     since sum_{i=1}^n M_i M_i' = M M' = I (since M is full rank).
00284 
00285     @ S(x)^{-1} = M diag(s)^{-1} M' = (1/sigma2_manifold - 1/sigma2_noise) U U' + 1/sigma2_noise I 
00286     using the same argument as above but replacing all sigma2* by 1/sigma2*.
00287 
00288     @ Hence S(x)^{-1} z = S(x)^{-1} (z_m + z_n) 
00289                       = (1/sigma2_manifold - 1/sigma2_noise) z_m + 1/sigma2_noise (z_m + z_n)
00290                       = 1/sigma2_manifold z_m + 1/sigma2_noise z_n
00291     where on the second line we used the fact that U U' acts as the identity
00292     matrix for vectors spanned by the columns of U, which can be shown as follows.
00293     Let z_m = sum_i a_i U_i. Then U U' z_m = sum_i a_i U U' U_i = sum_i a_i U e_i = sum_i a_i U_i = z_m.
00294 
00295     @ Also, z' S(x)^{-1} z = (z_m + z_n) (1/sigma2_manifold z_m + 1/sigma2_noise z_n)
00296                          = 1/sigma2_manifold ||z_m||^2 + 1/sigma2_noise ||z_n||^2
00297     since by construction z_m . z_n = 0.
00298 
00299     @ Finally, log(det(S(x))) = sum_{i=1}^n log(s_i) 
00300                               = sum_{i=1}^d log(sigma2_manifold) + sum_{i=d+1}^n log(sigma2_noise)
00301                               = d log(sigma2_manifold) + (n-d) log(sigma2_noise).
00302 
00303                               
00304 * Gradients on covariance for the semi-spherical model:
00305 
00306   We have already shown the gradient of NLL on mu(x) above. We need
00307   also here the gradient on sigma2_manifold, sigma2_noise, and F, all
00308   three of which are supposed to be functions of x (e.g. outputs of
00309   a neural network, so we need to provide the gradient on the output
00310   units of the neural network). Note that the sigma2's must be constrained
00311   to be positive (e.g. by squaring the output, using an exponential
00312   or softplus activation function).
00313 
00314     dNLL/dsigma2_manifold = 0.5 p(x|y) ( d/sigma2_manifold - ||z_m||^2/sigma2_manifold^2)
00315 
00316   N.B. this is the gradient on the variance, not on the standard deviation.
00317 
00318   Proof: Recall eq.(1) and let theta = dsigma2_manifold. Using eq.(2) we obtain
00319   for the first term in (1): 
00320     d/dsigma2_manifold (0.5/sigma2_manifold ||z_m||^2) = -0.5||z_m||^2/sigma2_manifold^2.
00321   Using (3) we obtain the second term 
00322     d/dsigma2_manifold (0.5 d log(sigma2_manifold)) = 0.5 d/sigma2_manifold.
00323 
00324   The same arguments yield the following for the gradient on sigma2_noise:
00325 
00326     dNLL/dsigma2_noise = 0.5 p(x|y) ( (n-d)/sigma2_noise - ||z_n||^2/sigma2_noise^2)
00327 
00328 
00329   Now let us consider the more difficult case of the theta = F_{ij} (i in {1..d}, j in {1..n}).
00330   The result is fortunately simple to write:
00331 
00332     dNLL/dF = p(x|y) (1/sigma2_manifold - 1/sigma2_noise) w z_n'
00333 
00334   Proof: First we see that the second term in eq.(1) does not depend on F because of eq.(3).
00335   For the first term of eq.(1), we obtain using (2)
00336     d(0.5 z'S(x)^{-1} z)/dF_{ij} 
00337       = d/dF_{ij} ((0.5/sigma2_manifold) ||z_m||^2 + (0.5/sigma2_noise) ||z_n||^2)
00338       = d/dF_{ij} ((0.5/sigma2_manifold) ||F'w||^2 + (0.5/sigma2_noise) ||z-F'w||^2)
00339       = (1/sigma2_manifold) (F'w)' d(F'w)/dF_{ij} + (1/sigma2_noise) z_n' d(z-F'w)/dF_{ij} 
00340       = (1/sigma2_manifold) (F'w)' d(F'w)/dF_{ij} - (1/sigma2_noise) z_n' d(F'w)/dF_{ij} (4)
00341   Note that w depends of F so we will have to compute two components:
00342     d(F'w)/dF_{ij} = w_i e_j + F' dw/dF_{ij}                                        (5)
00343   Now recall how w depends on F: w = (FF')^{-1} F z, and recall the identity 
00344   d(A^{-1})/dx = -A^{-1} dA/dx A^{-1} for square matrix A. Hence
00345     dw/dF_{ij} = - (FF')^{-1} d(FF')/dF_{ij} (FF')^{-1} F z + (FF')^{-1} dF/dF_{ij} z
00346                = - (FF')^{-1} ( F e_j e_i' + e_i e_j' F') w + (FF')^{-1} e_i e_j' z
00347   where we have replaced (FF')^{-1}Fz by w in the last factor of the first term, and
00348   where e_i is the d-vector with all 0's except a 1 at position i, and e_j is the n-vector
00349   with all 0's except a 1 at position j. It is easy to see that dF/dF_{ij} = e_i e_j'
00350   which is the matrix with all 0's except at position (i,j). Then 
00351     d(FF')/dF_{ij} = F (dF/dF_{ij})' + dF/dF_{ij} F' = F e_j e_i' + e_i e_j' F'.
00352   
00353   We are now ready to plug pop back and plug all these results together. First we plug
00354   the above in (5):
00355    d(F'w)/dF_{ij} = w_i e_j + F' (FF')^{-1} e_i e_j' z - (FF')^{-1} ( F e_j e_i' + e_i e_j' F') w
00356   then plug this back in (4) noting that FF' cancels with (FF')^{-1} everywhere in the sigma2_manifold term
00357    d(0.5 z'S(x)^{-1} z)/dF_{ij} =
00358      (1/sigma2_manifold)  (w'F w_i e_j + w'e_i e_j' z - w'(F e_j e_i' + e_i e_j' F') w
00359      - (1/sigma2_noise) w_i z_n'e_j
00360    using z_n'F' = 0, and z_n' (FF')^{-1} = z_n'VD^{-2}V'=0 since z_n is orthogonal to every column of V.
00361    Note: F'(FF')^{-1}F = UDV'(VD^{-2}V')VDU' = UU', and UU'z = UU'(z_m+z_n) = z_m to simplify last term.
00362    In the sigma2_manifold term let us use the facts that (a) each sub-term is a scalar, (b) tr(AB)=tr(BA),
00363    (c) scalar = scalar', and (e) e_i'A e_j = A_{ij} to write everything in matrix form:
00364        (1/sigma2_manifold)  (w'F e_j w_i  + w'e_i e_j' z - w'(F e_j e_i' + e_i e_j' F') w)
00365      = (1/sigma2_manifold)  (w'F e_j e_i' w + z'e_j e_i' w - w'F e_j e_i'w - z'UU'e_j e_i'w
00366      = (1/sigma2_manifold)  (e_i'ww'F e_j + e_i'wz'e_j - e_i'ww'Fe_j - e_i'w z_m' e_j
00367      = (1/sigma2_manifold)  (ww'F  + wz' - ww'F - w z_m')_{ij}
00368      = (1/sigma2_manifold)  (wz' - w z_m')_{ij}
00369      = (1/sigma2_manifold)  (w z_n')_{ij}
00370    Now let us do the sigma2_noise term:
00371        (1/sigma2_noise) w_i z_n'e_j = (1/sigma2_noise) e_i' w z_n'e_j = (1/sigma2_noise) (w z_n')_{ij}
00372    Putting the sigma2_manifold term and the sigma2_noise term together we obtain in matrix form
00373     d(0.5 z'S(x)^{-1} z)/dF = (1/sigma2_manifold) w z_n' - (1/sigma2_noise) w z_n'
00374    i.e. the final result
00375     d(0.5 z'S(x)^{-1} z)/dF = (1/sigma2_manifold - 1/sigma2_noise) w z_n'
00376    which gives (using dlog(det(S(x)))/dF = ), the claimed statement:
00377     dNLL/dF = p(x|y) (1/sigma2_manifold - 1/sigma2_noise) w z_n'
00378 
00379 */
00380 
00381 void GaussianContinuum::declareOptions(OptionList& ol)
00382 {
00383   // ### Declare all of this object's options here
00384   // ### For the "flags" of each option, you should typically specify  
00385   // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00386   // ### OptionBase::tuningoption. Another possible flag to be combined with
00387   // ### is OptionBase::nosave
00388 
00389   declareOption(ol, "weight_mu_and_tangent", &GaussianContinuum::weight_mu_and_tangent, OptionBase::buildoption,
00390                 "Weight of the cost on the scalar product between the manifold directions and mu.\n"
00391                 );
00392 
00393   declareOption(ol, "include_current_point", &GaussianContinuum::include_current_point, OptionBase::buildoption,
00394                 "Indication that the current point should be included in the nearest neighbors.\n"
00395                 );
00396 
00397   declareOption(ol, "n_neighbors", &GaussianContinuum::n_neighbors, OptionBase::buildoption,
00398                 "Number of nearest neighbors to consider for gradient descent.\n"
00399                 );
00400 
00401   declareOption(ol, "n_neighbors_density", &GaussianContinuum::n_neighbors_density, OptionBase::buildoption,
00402                 "Number of nearest neighbors to consider for p(x) density estimation.\n"
00403                 );
00404 
00405   declareOption(ol, "mu_n_neighbors", &GaussianContinuum::mu_n_neighbors, OptionBase::buildoption,
00406                 "Number of nearest neighbors to learn the mus (if < 0, mu_n_neighbors = n_neighbors).\n"
00407                 );
00408 
00409   declareOption(ol, "n_dim", &GaussianContinuum::n_dim, OptionBase::buildoption,
00410                 "Number of tangent vectors to predict.\n"
00411                 );
00412 
00413   declareOption(ol, "compute_cost_every_n_epochs", &GaussianContinuum::compute_cost_every_n_epochs, OptionBase::buildoption,
00414                 "Frequency of the computation of the cost on the training and validation set. \n"
00415                 );
00416 
00417   declareOption(ol, "optimizer", &GaussianContinuum::optimizer, OptionBase::buildoption,
00418                 "Optimizer that optimizes the cost function.\n"
00419                 );
00420                   
00421   declareOption(ol, "variances_transfer_function", &GaussianContinuum::variances_transfer_function, 
00422                 OptionBase::buildoption,
00423                 "Type of output transfer function for predicted variances, to force them to be >0:\n"
00424                 "  square : take the square\n"
00425                 "  exp : apply the exponential\n"
00426                 "  softplus : apply the function log(1+exp(.))\n"
00427                 );
00428                   
00429   declareOption(ol, "architecture_type", &GaussianContinuum::architecture_type, OptionBase::buildoption,
00430                 "For pre-defined tangent_predictor types: \n"
00431                 "   single_neural_network : prediction = b + W*tanh(c + V*x), where W has n_hidden_units columns\n"
00432                 "                          where the resulting vector is viewed as a n_dim by n matrix\n"
00433     "   embedding_neural_network: prediction[k,i] = d(e[k]/d(x[i), where e(x) is an ordinary neural\n"
00434     "                             network representing the embedding function (see output_type option)\n"
00435                 "where (b,W,c,V) are parameters to be optimized.\n"
00436                 );
00437 
00438   declareOption(ol, "n_hidden_units", &GaussianContinuum::n_hidden_units, OptionBase::buildoption,
00439                 "Number of hidden units (if architecture_type is some kind of neural network)\n"
00440                 );
00441 /*
00442   declareOption(ol, "output_type", &GaussianContinuum::output_type, OptionBase::buildoption,
00443                 "Default value (the only one considered if architecture_type != embedding_*) is\n"
00444     "   tangent_plane: output the predicted tangent plane.\n"
00445     "   embedding: output the embedding vector (only if architecture_type == embedding_*).\n"
00446     "   tangent_plane+embedding: output both (in this order).\n"
00447                 );
00448 */
00449  
00450   declareOption(ol, "batch_size", &GaussianContinuum::batch_size, OptionBase::buildoption, 
00451                 "    how many samples to use to estimate the average gradient before updating the weights\n"
00452                 "    0 is equivalent to specifying training_set->length() \n");
00453 
00454   declareOption(ol, "svd_threshold", &GaussianContinuum::svd_threshold, OptionBase::buildoption,
00455                 "Threshold to accept singular values of F in solving for linear combination weights on tangent subspace.\n"
00456                 );
00457 
00458   declareOption(ol, "print_parameters", &GaussianContinuum::print_parameters, OptionBase::buildoption,
00459                 "Indication that the parameters should be printed for the training set points.\n"
00460                 );
00461 
00462    declareOption(ol, "sm_bigger_than_sn", &GaussianContinuum::sm_bigger_than_sn, OptionBase::buildoption,
00463                 "Indication that sm should always be bigger than sn.\n"
00464                 );
00465 
00466   declareOption(ol, "save_image_mat", &GaussianContinuum::save_image_mat, OptionBase::buildoption,
00467                 "Indication that a matrix corresponding to the probabilities of the points on a 2d grid should be created.\n"
00468                 );
00469 
00470   declareOption(ol, "walk_on_noise", &GaussianContinuum::walk_on_noise, OptionBase::buildoption,
00471                 "Indication that the random walk should also consider the noise variation.\n"
00472                 );
00473 
00474   declareOption(ol, "upper_y", &GaussianContinuum::upper_y, OptionBase::buildoption,
00475                 "Upper bound on the y (second) coordinate.\n"
00476                 );
00477   
00478   declareOption(ol, "upper_x", &GaussianContinuum::upper_x, OptionBase::buildoption,
00479                 "Lower bound on the x (first) coordinate.\n"
00480                 );
00481 
00482   declareOption(ol, "lower_y", &GaussianContinuum::lower_y, OptionBase::buildoption,
00483                 "Lower bound on the y (second) coordinate.\n"
00484                 );
00485   
00486   declareOption(ol, "lower_x", &GaussianContinuum::lower_x, OptionBase::buildoption,
00487                 "Lower bound on the x (first) coordinate.\n"
00488                 );
00489 
00490   declareOption(ol, "points_per_dim", &GaussianContinuum::points_per_dim, OptionBase::buildoption,
00491                 "Number of points per dimension on the grid.\n"
00492                 );
00493 
00494   declareOption(ol, "parameters", &GaussianContinuum::parameters, OptionBase::learntoption,
00495                 "Parameters of the tangent_predictor function.\n"
00496                 );
00497 
00498   declareOption(ol, "Bs", &GaussianContinuum::Bs, OptionBase::learntoption,
00499                 "The B matrices for the training set.\n"
00500                 );
00501 
00502   declareOption(ol, "Fs", &GaussianContinuum::Fs, OptionBase::learntoption,
00503                 "The F (tangent planes) matrices for the training set.\n"
00504                 );
00505 
00506   declareOption(ol, "mus", &GaussianContinuum::mus, OptionBase::learntoption,
00507                 "The mu vertors for the training set.\n"
00508                 );
00509 
00510   declareOption(ol, "sms", &GaussianContinuum::sms, OptionBase::learntoption,
00511                 "The sm values for the training set.\n"
00512                 );
00513   
00514   declareOption(ol, "sns", &GaussianContinuum::sns, OptionBase::learntoption,
00515                 "The sn values for the training set.\n"
00516                 );
00517 
00518   declareOption(ol, "min_sigma", &GaussianContinuum::min_sigma, OptionBase::buildoption,
00519                 "The minimum value for sigma noise and manifold.\n"
00520                 );
00521 
00522   declareOption(ol, "min_diff", &GaussianContinuum::min_diff, OptionBase::buildoption,
00523                 "The minimum value for the difference between sigma manifold and noise.\n"
00524                 );
00525 
00526   declareOption(ol, "min_p_x", &GaussianContinuum::min_p_x, OptionBase::buildoption,
00527                 "The minimum value for p_x, for stability concerns when doing gradient descent.\n"
00528                 );
00529 
00530   declareOption(ol, "n_random_walk_step", &GaussianContinuum::n_random_walk_step, OptionBase::buildoption,
00531                 "The number of random walk step.\n"
00532                 );
00533 
00534   declareOption(ol, "n_random_walk_per_point", &GaussianContinuum::n_random_walk_per_point, OptionBase::buildoption,
00535                 "The number of random walks per training set point.\n"
00536                 );
00537 
00538   declareOption(ol, "noise", &GaussianContinuum::noise, OptionBase::buildoption,
00539                 "Noise parameter for the training data.\n"
00540                 );
00541 
00542   declareOption(ol, "noise_type", &GaussianContinuum::noise_type, OptionBase::buildoption,
00543                 "Type of the noise (\"uniform\" or \"gaussian\").\n"
00544                 );
00545 
00546   declareOption(ol, "use_noise", &GaussianContinuum::use_noise, OptionBase::buildoption,
00547                 "Indication that the training should be done using noise on training data.\n"
00548                 );
00549 
00550   declareOption(ol, "use_noise_direction", &GaussianContinuum::use_noise_direction, OptionBase::buildoption,
00551                 "Indication that the noise should be directed in the noise directions.\n"
00552                 );
00553 
00554   declareOption(ol, "random_walk_step_prop", &GaussianContinuum::random_walk_step_prop, OptionBase::buildoption,
00555                 "Proportion or confidence of the random walk steps.\n"
00556                 );
00557 
00558   declareOption(ol, "validation_prop", &GaussianContinuum::validation_prop, OptionBase::buildoption,
00559                 "Proportion of points for validation set (if uncorrect value, validtion_set == train_set).\n"
00560                 );
00561   
00562   declareOption(ol, "reference_set", &GaussianContinuum::reference_set, OptionBase::learntoption,
00563                 "Reference points for density computation.\n"
00564                 );
00565   
00566   
00567 
00568 
00569   // Now call the parent class' declareOptions
00570   inherited::declareOptions(ol);
00571 }
00572 
00573 void GaussianContinuum::build_()
00574 {
00575 
00576   n = PLearner::inputsize_;
00577 
00578   if (n>0)
00579   {
00580     Var log_n_examples(1,1,"log(n_examples)");
00581 
00582 
00583     {
00584       if (n_hidden_units <= 0)
00585         PLERROR("GaussianContinuum::Number of hidden units should be positive, now %d\n",n_hidden_units);
00586 
00587       if(validation_prop <= 0 || validation_prop >= 1) valid_set = train_set;
00588       else
00589       {
00590         // Making FractionSplitter
00591         PP<FractionSplitter> fsplit = new FractionSplitter();
00592         TMat<pair<real,real> > splits(1,2); 
00593         splits(0,0).first = 0; splits(0,0).second = 1-validation_prop;
00594         splits(0,1).first = 1-validation_prop; splits(0,1).second = 1;
00595         fsplit->splits = splits;
00596         fsplit->build();
00597       
00598         // Making RepeatSplitter
00599         PP<RepeatSplitter> rsplit = new RepeatSplitter();
00600         rsplit->n = 1;
00601         rsplit->shuffle = true;
00602         rsplit->seed = 123456;
00603         rsplit->to_repeat = fsplit;
00604         rsplit->setDataSet(train_set);
00605         rsplit->build();
00606 
00607         TVec<VMat> vmat_splits = rsplit->getSplit();
00608         train_set = vmat_splits[0];
00609         valid_set = vmat_splits[1];
00610       
00611       }
00612 
00613       x = Var(n);
00614       c = Var(n_hidden_units,1,"c ");
00615       V = Var(n_hidden_units,n,"V ");               
00616       Var a = tanh(c + product(V,x));
00617       muV = Var(n,n_hidden_units,"muV "); 
00618       smV = Var(1,n_hidden_units,"smV ");  
00619       smb = Var(1,1,"smB ");
00620       snV = Var(1,n_hidden_units,"snV ");  
00621       snb = Var(1,1,"snB ");      
00622         
00623 
00624       if(architecture_type == "embedding_neural_network")
00625       {
00626         W = Var(n_dim,n_hidden_units,"W ");       
00627         tangent_plane = diagonalized_factors_product(W,1-a*a,V); 
00628         embedding = product(W,a);
00629       } 
00630       else if(architecture_type == "single_neural_network")
00631       {
00632         b = Var(n_dim*n,1,"b");
00633         W = Var(n_dim*n,n_hidden_units,"W ");
00634         tangent_plane = reshape(b + product(W,tanh(c + product(V,x))),n_dim,n);
00635       }
00636       else
00637         PLERROR("GaussianContinuum::build_, unknown architecture_type option %s",
00638                 architecture_type.c_str());
00639      
00640       mu = product(muV,a); 
00641       min_sig = new SourceVariable(1,1);
00642       min_sig->value[0] = min_sigma;
00643       min_sig->setName("min_sig");
00644       min_d = new SourceVariable(1,1);
00645       min_d->value[0] = min_diff;
00646       min_d->setName("min_d");
00647 
00648       if(noise > 0)
00649       {
00650         if(noise_type == "uniform")
00651         {
00652           PP<UniformDistribution> temp = new UniformDistribution();
00653           Vec lower_noise(n);
00654           Vec upper_noise(n);
00655           for(int i=0; i<n; i++)
00656           {
00657             lower_noise[i] = -1*noise;
00658             upper_noise[i] = noise;
00659           }
00660           temp->min = lower_noise;
00661           temp->max = upper_noise;
00662           dist = temp;
00663         }
00664         else if(noise_type == "gaussian")
00665         {
00666           PP<GaussianDistribution> temp = new GaussianDistribution();
00667           Vec mu(n); mu.clear();
00668           Vec eig_values(n); 
00669           Mat eig_vectors(n,n); eig_vectors.clear();
00670           for(int i=0; i<n; i++)
00671           {
00672             eig_values[i] = noise; // maybe should be adjusted to the sigma noiseat the input
00673             eig_vectors(i,i) = 1.0;
00674           }
00675           temp->mu = mu;
00676           temp->eigenvalues = eig_values;
00677           temp->eigenvectors = eig_vectors;
00678           dist = temp;
00679         }
00680         else PLERROR("In GaussianContinuum::build_() : noise_type %c not defined",noise_type.c_str());
00681         noise_var = new PDistributionVariable(x,dist);
00682         if(use_noise_direction)
00683         {
00684           for(int k=0; k<n_dim; k++)
00685           {
00686             Var index_var = new SourceVariable(1,1);
00687             index_var->value[0] = k;
00688             Var f_k = new VarRowVariable(tangent_plane,index_var);
00689             noise_var = noise_var - product(f_k,noise_var)* transpose(f_k)/pownorm(f_k,2);
00690           }
00691         }
00692         noise_var = no_bprop(noise_var);
00693         noise_var->setName(noise_type);
00694       }
00695       else
00696       {
00697         noise_var = new SourceVariable(n,1);
00698         noise_var->setName("no noise");
00699         for(int i=0; i<n; i++)
00700           noise_var->value[i] = 0;
00701       }
00702 
00703 
00704       // Path for noisy mu
00705       Var a_noisy = tanh(c + product(V,x+noise_var));
00706       mu_noisy = product(muV,a_noisy); 
00707 
00708       if(sm_bigger_than_sn)
00709       {
00710         if(variances_transfer_function == "softplus") sn = softplus(snb + product(snV,a)) + min_sig;
00711         else if(variances_transfer_function == "square") sn = square(snb + product(snV,a)) + min_sig;
00712         else if(variances_transfer_function == "exp") sn = exp(snb + product(snV,a)) + min_sig;
00713         else PLERROR("In GaussianContinuum::build_ : unknown variances_transfer_function option %s ", variances_transfer_function.c_str());
00714         Var diff;
00715         
00716         if(variances_transfer_function == "softplus") diff = softplus(smb + product(smV,a)) + min_d;
00717         else if(variances_transfer_function == "square") diff = square(smb + product(smV,a)) + min_d;
00718         else if(variances_transfer_function == "exp") diff = exp(smb + product(smV,a)) + min_d;
00719         sm = sn + diff;
00720       }
00721       else
00722       {
00723         if(variances_transfer_function == "softplus"){
00724           sm = softplus(smb + product(smV,a)) + min_sig; 
00725           sn = softplus(snb + product(snV,a)) + min_sig;
00726         }
00727         else if(variances_transfer_function == "square"){
00728           sm = square(smb + product(smV,a)) + min_sig; 
00729           sn = square(snb + product(snV,a)) + min_sig;
00730         }
00731         else if(variances_transfer_function == "exp"){
00732           sm = exp(smb + product(smV,a)) + min_sig; 
00733           sn = exp(snb + product(snV,a)) + min_sig;
00734         }
00735         else PLERROR("In GaussianContinuum::build_ : unknown variances_transfer_function option %s ", variances_transfer_function.c_str());
00736       }
00737       
00738       mu_noisy->setName("mu_noisy ");
00739       tangent_plane->setName("tangent_plane ");
00740       mu->setName("mu ");
00741       sm->setName("sm ");
00742       sn->setName("sn ");
00743       a_noisy->setName("a_noisy ");
00744       a->setName("a ");
00745       if(architecture_type == "embedding_neural_network")
00746         embedding->setName("embedding ");
00747       x->setName("x ");
00748 
00749       if(architecture_type == "embedding_neural_network")
00750         predictor = Func(x, W & c & V & muV & smV & smb & snV & snb, tangent_plane & mu & sm & sn);
00751       if(architecture_type == "single_neural_network")
00752         predictor = Func(x, b & W & c & V & muV & smV & smb & snV & snb, tangent_plane & mu & sm & sn);
00753       /*
00754       if (output_type=="tangent_plane")
00755         output_f = Func(x, tangent_plane);
00756       else if (output_type=="embedding")
00757       {
00758         if(architecture_type == "single_neural_network")
00759           PLERROR("Cannot obtain embedding with single_neural_network architecture");
00760         output_f = Func(x, embedding);
00761       }
00762       else if (output_type=="tangent_plane+embedding")
00763       {
00764         if(architecture_type == "single_neural_network")
00765           PLERROR("Cannot obtain embedding with single_neural_network architecture");
00766         output_f = Func(x, tangent_plane & embedding);
00767       }
00768       else if(output_type == "tangent_plane_variance_normalized")
00769         output_f = Func(x,tangent_plane & sm);
00770       else if(output_type == "semispherical_gaussian_parameters")
00771         output_f = Func(x,tangent_plane & mu & sm & sn);
00772       */
00773       output_f_all = Func(x,tangent_plane & mu & sm & sn);
00774     }
00775     
00776 
00777     if (parameters.size()>0 && parameters.nelems() == predictor->parameters.nelems())
00778       predictor->parameters.copyValuesFrom(parameters);
00779     parameters.resize(predictor->parameters.size());
00780     for (int i=0;i<parameters.size();i++)
00781       parameters[i] = predictor->parameters[i];
00782 
00783     Var target_index = Var(1,1);
00784     target_index->setName("target_index");
00785     Var neighbor_indexes = Var(n_neighbors,1);
00786     neighbor_indexes->setName("neighbor_indexes");
00787     p_x = Var(train_set->length(),1);
00788     p_x->setName("p_x");
00789     p_target = new VarRowsVariable(p_x,target_index);
00790     p_target->setName("p_target");
00791     p_neighbors =new VarRowsVariable(p_x,neighbor_indexes);
00792     p_neighbors->setName("p_neighbors");
00793 
00794     tangent_targets = Var(n_neighbors,n);
00795     if(include_current_point)
00796     {
00797       Var temp = new SourceVariable(1,n);
00798       temp->value.fill(0);
00799       tangent_targets_and_point = vconcat(temp & tangent_targets);
00800       p_neighbors_and_point = vconcat(p_target & p_neighbors);
00801     }
00802     else
00803     {
00804       tangent_targets_and_point = tangent_targets;
00805       p_neighbors_and_point = p_neighbors;
00806     }
00807     
00808     if(mu_n_neighbors < 0 ) mu_n_neighbors = n_neighbors;
00809 
00810     // compute - log ( sum_{neighbors of x} P(neighbor|x) ) according to semi-spherical model
00811     Var nll = nll_semispherical_gaussian(tangent_plane, mu, sm, sn, tangent_targets_and_point, p_target, p_neighbors_and_point, noise_var, mu_noisy,
00812                                          use_noise, svd_threshold, min_p_x, mu_n_neighbors); // + log_n_examples;
00813     //nll_f = Func(tangent_plane & mu & sm & sn & tangent_targets, nll);
00814     Var knn = new SourceVariable(1,1);
00815     knn->setName("knn");
00816     knn->value[0] = n_neighbors + (include_current_point ? 1 : 0);
00817 
00818     if(weight_mu_and_tangent != 0)
00819     {
00820       sum_nll = new ColumnSumVariable(nll) / knn + weight_mu_and_tangent * ((Var) new RowSumVariable(square(product(no_bprop(tangent_plane),mu_noisy))));
00821     }
00822     else
00823       sum_nll = new ColumnSumVariable(nll) / knn;
00824 
00825     cost_of_one_example = Func(x & tangent_targets & target_index & neighbor_indexes, predictor->parameters, sum_nll);
00826     noisy_data = Func(x,x + noise_var);    // Func to verify what's the noisy data like (doesn't work so far, this problem will be investigated)
00827     //verify_gradient_func = Func(predictor->inputs & tangent_targets & target_index & neighbor_indexes, predictor->parameters & mu_noisy, sum_nll);  
00828 
00829     if(n_neighbors_density > train_set.length() || n_neighbors_density < 0) n_neighbors_density = train_set.length();
00830 
00831     best_validation_cost = REAL_MAX;
00832 
00833     train_nearest_neighbors.resize(train_set.length(), n_neighbors_density-1);
00834     validation_nearest_neighbors.resize(valid_set.length(), n_neighbors_density);
00835 
00836     t_row.resize(n);
00837     Ut_svd.resize(n,n);
00838     V_svd.resize(n_dim,n_dim);
00839     z.resize(n);
00840     zm.resize(n);
00841     zn.resize(n);
00842     x_minus_neighbor.resize(n);
00843     neighbor_row.resize(n);
00844     w.resize(n_dim);
00845 
00846     Bs.resize(train_set.length());
00847     Fs.resize(train_set.length());
00848     mus.resize(train_set.length(), n);
00849     sms.resize(train_set.length());
00850     sns.resize(train_set.length());
00851     
00852     reference_set = train_set;
00853   }
00854 
00855 }
00856 
00857 void GaussianContinuum::update_reference_set_parameters()
00858 {
00859     // Compute Fs, Bs, mus, sms, sns
00860   Bs.resize(reference_set.length());
00861   Fs.resize(reference_set.length());
00862   mus.resize(reference_set.length(), n);
00863   sms.resize(reference_set.length());
00864   sns.resize(reference_set.length());
00865   
00866   for(int t=0; t<reference_set.length(); t++)
00867   {
00868     Fs[t].resize(tangent_plane.length(), tangent_plane.width());
00869     reference_set->getRow(t,t_row);
00870     predictor->fprop(t_row, Fs[t].toVec() & mus(t) & sms.subVec(t,1) & sns.subVec(t,1));
00871     
00872     // computing B
00873 
00874     static Mat F_copy;
00875     F_copy.resize(Fs[t].length(),Fs[t].width());
00876     F_copy << Fs[t];
00877     // N.B. this is the SVD of F'
00878     lapackSVD(F_copy, Ut_svd, S_svd, V_svd);
00879     Bs[t].resize(n_dim,reference_set.width());
00880     Bs[t].clear();
00881     for (int k=0;k<S_svd.length();k++)
00882     {
00883       real s_k = S_svd[k];
00884       if (s_k>svd_threshold) // ignore the components that have too small singular value (more robust solution)
00885       { 
00886         real coef = 1/s_k;
00887         for (int i=0;i<n_dim;i++)
00888         {
00889           real* Bi = Bs[t][i];
00890           for (int j=0;j<n;j++)
00891             Bi[j] += V_svd(i,k)*Ut_svd(k,j)*coef;
00892         }
00893       }
00894     }
00895     
00896   }
00897 
00898 }
00899 
00900 void GaussianContinuum::knn(const VMat& vm, const Vec& x, const int& k, TVec<int>& neighbors, bool sortk) const
00901 {
00902   int n = vm->length();
00903   distances.resize(n,2);
00904   distances.column(1) << Vec(0, n-1, 1); 
00905   dk.setDataForKernelMatrix(vm);
00906   t_dist.resize(n);
00907   dk.evaluate_all_i_x(x, t_dist);
00908   distances.column(0) << t_dist;
00909   partialSortRows(distances, k, sortk);
00910   neighbors.resize(k);
00911   for (int i = 0; i < k; i++)
00912     neighbors[i] = int(distances(i,1));
00913 }
00914 
00915 void GaussianContinuum::make_random_walk()
00916 {
00917   if(n_random_walk_step < 1) PLERROR("Number of step in random walk should be at least one");
00918   if(n_random_walk_per_point < 1) PLERROR("Number of random walk per training set point should be at least one");
00919   ith_step_generated_set.resize(n_random_walk_step);
00920 
00921   Mat generated_set(train_set.length()*n_random_walk_per_point,n);
00922   for(int t=0; t<train_set.length(); t++)
00923   {
00924     train_set->getRow(t,t_row);
00925     output_f_all(t_row);
00926       
00927     real this_sm = sm->value[0];
00928     real this_sn = sn->value[0];
00929     Vec this_mu(n); this_mu << mu->value;
00930     static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
00931       
00932     // N.B. this is the SVD of F'
00933     lapackSVD(this_F, Ut_svd, S_svd, V_svd);
00934       
00935 
00936     for(int rwp=0; rwp<n_random_walk_per_point; rwp++)
00937     {
00938       TVec<real> z_m(n_dim);
00939       TVec<real> z(n);
00940       for(int i=0; i<n_dim; i++)
00941         z_m[i] = normal_sample();
00942       for(int i=0; i<n; i++)
00943         z[i] = normal_sample();
00944 
00945       Vec new_point = generated_set(t*n_random_walk_per_point+rwp);
00946       for(int j=0; j<n; j++)
00947       {
00948         new_point[j] = 0;         
00949         for(int k=0; k<n_dim; k++)
00950           new_point[j] += Ut_svd(k,j)*z_m[k];
00951         new_point[j] *= sqrt(this_sm-this_sn);
00952         if(walk_on_noise)
00953           new_point[j] += z[j]*sqrt(this_sn);
00954       }
00955       new_point *= random_walk_step_prop;
00956       new_point += this_mu + t_row;
00957     }
00958   }
00959 
00960   // Test of generation of random points
00961   /*
00962   int n_test_gen_points = 3;
00963   int n_test_gen_generated = 30;
00964 
00965   Mat test_gen(n_test_gen_points*n_test_gen_generated,n);
00966   for(int p=0; p<n_test_gen_points; p++)
00967   {
00968     for(int t=0; t<n_test_gen_generated; t++)             
00969     {
00970       valid_set->getRow(p,t_row);
00971       output_f_all(t_row);
00972       
00973       real this_sm = sm->value[0];
00974       real this_sn = sn->value[0];
00975       Vec this_mu(n); this_mu << mu->value;
00976       static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
00977       
00978       // N.B. this is the SVD of F'
00979       lapackSVD(this_F, Ut_svd, S_svd, V_svd);      
00980 
00981       TVec<real> z_m(n_dim);
00982       TVec<real> z(n);
00983       for(int i=0; i<n_dim; i++)
00984         z_m[i] = normal_sample();
00985       for(int i=0; i<n; i++)
00986         z[i] = normal_sample();
00987 
00988       Vec new_point = test_gen(p*n_test_gen_generated+t);
00989       for(int j=0; j<n; j++)
00990       {
00991         new_point[j] = 0;         
00992         for(int k=0; k<n_dim; k++)
00993           new_point[j] += Ut_svd(k,j)*z_m[k];
00994         new_point[j] *= sqrt(this_sm-this_sn);
00995         if(walk_on_noise)
00996           new_point[j] += z[j]*sqrt(this_sn);
00997       }
00998       new_point += this_mu + t_row;
00999     }
01000   }
01001   
01002   PLearn::save("test_gen.psave",test_gen);
01003   */
01004   //PLearn::save("gen_points_0.psave",generated_set);
01005   ith_step_generated_set[0] = VMat(generated_set);
01006   
01007   for(int step=1; step<n_random_walk_step; step++)
01008   {
01009     Mat generated_set(ith_step_generated_set[step-1].length(),n);
01010     for(int t=0; t<ith_step_generated_set[step-1].length(); t++)
01011     {
01012       ith_step_generated_set[step-1]->getRow(t,t_row);
01013       output_f_all(t_row);
01014       
01015       real this_sm = sm->value[0];
01016       real this_sn = sn->value[0];
01017       Vec this_mu(n); this_mu << mu->value;
01018       static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
01019       
01020       // N.B. this is the SVD of F'
01021       lapackSVD(this_F, Ut_svd, S_svd, V_svd);
01022       
01023       TVec<real> z_m(n_dim);
01024       TVec<real> z(n);
01025       for(int i=0; i<n_dim; i++)
01026         z_m[i] = normal_sample();
01027       for(int i=0; i<n; i++)
01028         z[i] = normal_sample();
01029       
01030       Vec new_point = generated_set(t);
01031       for(int j=0; j<n; j++)
01032       {
01033         new_point[j] = 0;
01034         for(int k=0; k<n_dim; k++)
01035           if(S_svd[k] > svd_threshold)
01036             new_point[j] += Ut_svd(k,j)*z_m[k];
01037         new_point[j] *= sqrt(this_sm-this_sn);
01038         if(walk_on_noise)
01039           new_point[j] += z[j]*sqrt(this_sn);
01040       }
01041       new_point *= random_walk_step_prop;
01042       new_point += this_mu + t_row;
01043     
01044     }
01045     /*
01046     string path = " ";
01047     if(step == n_random_walk_step-1)
01048       path = "gen_points_last.psave";
01049     else
01050       path = "gen_points_" + tostring(step) + ".psave";
01051     
01052     PLearn::save(path,generated_set);
01053     */
01054     ith_step_generated_set[step] = VMat(generated_set);
01055   }
01056 
01057   reference_set = vconcat(train_set & ith_step_generated_set);
01058 
01059   // Single random walk
01060   /*
01061   Mat single_walk_set(100,n);
01062   train_set->getRow(train_set.length()-1,single_walk_set(0));
01063   for(int step=1; step<100; step++)
01064   {
01065     t_row << single_walk_set(step-1);
01066     output_f_all(t_row);
01067       
01068     real this_sm = sm->value[0];
01069     real this_sn = sn->value[0];
01070     Vec this_mu(n); this_mu << mu->value;
01071     static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
01072     
01073     // N.B. this is the SVD of F'
01074     lapackSVD(this_F, Ut_svd, S_svd, V_svd);
01075     
01076     TVec<real> z_m(n_dim);
01077     TVec<real> z(n);
01078     for(int i=0; i<n_dim; i++)
01079       z_m[i] = normal_sample();
01080     for(int i=0; i<n; i++)
01081       z[i] = normal_sample();
01082     
01083     Vec new_point = single_walk_set(step);
01084     for(int j=0; j<n; j++)
01085     {
01086       new_point[j] = 0;
01087       for(int k=0; k<n_dim; k++)
01088         if(S_svd[k] > svd_threshold)
01089           new_point[j] += Ut_svd(k,j)*z_m[k];
01090       new_point[j] *= sqrt(this_sm-this_sn);
01091       if(walk_on_noise)
01092         new_point[j] += z[j]*sqrt(this_sn);
01093     }
01094     new_point *= random_walk_step_prop;
01095     new_point += this_mu + t_row;
01096   }
01097   PLearn::save("image_single_rw.psave",single_walk_set);
01098   */
01099 }
01100 
01101 
01102 real GaussianContinuum::get_nll(VMat points, VMat image_points_vmat, int begin, int n_near_neigh)
01103 {
01104   VMat reference_set = new SubVMatrix(points,begin,0,points.length()-begin,n);
01105   //Mat image(points_per_dim,points_per_dim); image.clear();
01106   image_nearest_neighbors.resize(image_points_vmat.length(),n_near_neigh);
01107   // Finding nearest neighbors
01108 
01109   for(int t=0; t<image_points_vmat.length(); t++)
01110   {
01111     image_points_vmat->getRow(t,t_row);
01112     TVec<int> nn = image_nearest_neighbors(t);
01113     computeNearestNeighbors(reference_set, t_row, nn);
01114   }
01115 
01116   real nll = 0;
01117 
01118   for(int t=0; t<image_points_vmat.length(); t++)
01119   {
01120     
01121     image_points_vmat->getRow(t,t_row);
01122     real this_p_x = 0;
01123     // fetching nearest neighbors for density estimation
01124     for(int neighbor=0; neighbor<n_near_neigh; neighbor++)
01125     {
01126       points->getRow(begin+image_nearest_neighbors(t,neighbor), neighbor_row);
01127       substract(t_row,neighbor_row,x_minus_neighbor);
01128       substract(x_minus_neighbor,mus(begin+image_nearest_neighbors(t,neighbor)),z);
01129       product(w, Bs[begin+image_nearest_neighbors(t,neighbor)], z);
01130       transposeProduct(zm, Fs[begin+image_nearest_neighbors(t,neighbor)], w);
01131       substract(z,zm,zn);
01132       this_p_x += exp(-0.5*(pownorm(zm,2)/sms[begin+image_nearest_neighbors(t,neighbor)] + pownorm(zn,2)/sns[begin+image_nearest_neighbors(t,neighbor)] 
01133                             + n_dim*log(sms[begin+image_nearest_neighbors(t,neighbor)]) + (n-n_dim)*log(sns[begin+image_nearest_neighbors(t,neighbor)])) - n/2.0 * Log2Pi);
01134     }
01135     
01136     this_p_x /= reference_set.length();
01137     nll -= log(this_p_x);
01138   }
01139 
01140   return nll/image_points_vmat.length();
01141 }
01142 
01143 void GaussianContinuum::get_image_matrix(VMat points, VMat image_points_vmat, int begin, string file_path, int n_near_neigh)
01144 {
01145   VMat reference_set = new SubVMatrix(points,begin,0,points.length()-begin,n);
01146   cout << "Creating image matrix: " << file_path << endl;
01147   Mat image(points_per_dim,points_per_dim); image.clear();
01148   image_nearest_neighbors.resize(points_per_dim*points_per_dim,n_near_neigh);
01149   // Finding nearest neighbors
01150 
01151   for(int t=0; t<image_points_vmat.length(); t++)
01152   {
01153     image_points_vmat->getRow(t,t_row);
01154     TVec<int> nn = image_nearest_neighbors(t);
01155     computeNearestNeighbors(reference_set, t_row, nn);
01156   }
01157 
01158   for(int t=0; t<image_points_vmat.length(); t++)
01159   {
01160     
01161     image_points_vmat->getRow(t,t_row);
01162     real this_p_x = 0;
01163     // fetching nearest neighbors for density estimation
01164     for(int neighbor=0; neighbor<n_near_neigh; neighbor++)
01165     {
01166       points->getRow(begin+image_nearest_neighbors(t,neighbor), neighbor_row);
01167       substract(t_row,neighbor_row,x_minus_neighbor);
01168       substract(x_minus_neighbor,mus(begin+image_nearest_neighbors(t,neighbor)),z);
01169       product(w, Bs[begin+image_nearest_neighbors(t,neighbor)], z);
01170       transposeProduct(zm, Fs[begin+image_nearest_neighbors(t,neighbor)], w);
01171       substract(z,zm,zn);
01172       this_p_x += exp(-0.5*(pownorm(zm,2)/sms[begin+image_nearest_neighbors(t,neighbor)] + pownorm(zn,2)/sns[begin+image_nearest_neighbors(t,neighbor)] 
01173                             + n_dim*log(sms[begin+image_nearest_neighbors(t,neighbor)]) + (n-n_dim)*log(sns[begin+image_nearest_neighbors(t,neighbor)])) - n/2.0 * Log2Pi);
01174     }
01175     
01176     this_p_x /= reference_set.length();
01177     int y_coord = t/points_per_dim;
01178     int x_coord = t%points_per_dim;
01179     image(points_per_dim - y_coord - 1,x_coord) = this_p_x;
01180   }
01181   PLearn::save(file_path,image);
01182   
01183 }
01184 
01185 
01186 
01187 void GaussianContinuum::compute_train_and_validation_costs()
01188 {
01189   update_reference_set_parameters();
01190 
01191   // estimate p(x) for the training set
01192 
01193   real nll_train = 0;
01194 
01195   for(int t=0; t<train_set.length(); t++)
01196   {
01197 
01198     train_set->getRow(t,t_row);
01199     p_x->value[t] = 0;
01200     // fetching nearest neighbors for density estimation
01201     for(int neighbor=0; neighbor<train_nearest_neighbors.width(); neighbor++)
01202     {
01203       train_set->getRow(train_nearest_neighbors(t,neighbor),neighbor_row);
01204       substract(t_row,neighbor_row,x_minus_neighbor);
01205       substract(x_minus_neighbor,mus(train_nearest_neighbors(t,neighbor)),z);
01206       product(w, Bs[train_nearest_neighbors(t,neighbor)], z);
01207       transposeProduct(zm, Fs[train_nearest_neighbors(t,neighbor)], w);
01208       substract(z,zm,zn);
01209       p_x->value[t] += exp(-0.5*(pownorm(zm,2)/sms[train_nearest_neighbors(t,neighbor)] + pownorm(zn,2)/sns[train_nearest_neighbors(t,neighbor)] 
01210                          + n_dim*log(sms[train_nearest_neighbors(t,neighbor)]) + (n-n_dim)*log(sns[train_nearest_neighbors(t,neighbor)])) - n/2.0 * Log2Pi);
01211     }
01212     p_x->value[t] /= train_set.length();
01213     nll_train -= log(p_x->value[t]);
01214 
01215     if(print_parameters)
01216     {
01217       output_f_all(t_row);
01218       cout << "data point = " << x->value << " parameters = " << tangent_plane->value << " " << mu->value << " " << sm->value << " " << sn->value << " p(x) = " << p_x->value[t] << endl;
01219     }
01220   }
01221 
01222   nll_train /= train_set.length();
01223 
01224   if(verbosity > 2) cout << "NLL train = " << nll_train << endl;
01225 
01226   // estimate p(x) for the validation set
01227 
01228   real nll_validation = 0;
01229 
01230   for(int t=0; t<valid_set.length(); t++)
01231   {
01232 
01233     valid_set->getRow(t,t_row);
01234     real this_p_x = 0;
01235     // fetching nearest neighbors for density estimation
01236     for(int neighbor=0; neighbor<n_neighbors_density; neighbor++)
01237     {
01238       train_set->getRow(validation_nearest_neighbors(t,neighbor), neighbor_row);
01239       substract(t_row,neighbor_row,x_minus_neighbor);
01240       substract(x_minus_neighbor,mus(validation_nearest_neighbors(t,neighbor)),z);
01241       product(w, Bs[validation_nearest_neighbors(t,neighbor)], z);
01242       transposeProduct(zm, Fs[validation_nearest_neighbors(t,neighbor)], w);
01243       substract(z,zm,zn);
01244       this_p_x += exp(-0.5*(pownorm(zm,2)/sms[validation_nearest_neighbors(t,neighbor)] + pownorm(zn,2)/sns[validation_nearest_neighbors(t,neighbor)] 
01245                          + n_dim*log(sms[validation_nearest_neighbors(t,neighbor)]) + (n-n_dim)*log(sns[validation_nearest_neighbors(t,neighbor)])) - n/2.0 * Log2Pi);
01246     }
01247 
01248     this_p_x /= train_set.length();  // When points will be added using a random walk, this will need to be changed (among other things...)
01249     nll_validation -= log(this_p_x);
01250   }
01251 
01252   nll_validation /= valid_set.length();
01253 
01254   if(verbosity > 2) cout << "NLL validation = " << nll_validation << endl;
01255 
01256 }
01257 
01258 // ### Nothing to add here, simply calls build_
01259 void GaussianContinuum::build()
01260 {
01261   inherited::build();
01262   build_();
01263 }
01264 
01265 extern void varDeepCopyField(Var& field, CopiesMap& copies);
01266 
01267 void GaussianContinuum::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01268 {  inherited::makeDeepCopyFromShallowCopy(copies);
01269 
01270   deepCopyField(cost_of_one_example, copies);
01271   deepCopyField(reference_set,copies);
01272   varDeepCopyField(x, copies);
01273   varDeepCopyField(noise_var, copies);  
01274   varDeepCopyField(b, copies);
01275   varDeepCopyField(W, copies);
01276   varDeepCopyField(c, copies);
01277   varDeepCopyField(V, copies);
01278   varDeepCopyField(tangent_targets, copies);
01279   varDeepCopyField(muV, copies);
01280   varDeepCopyField(smV, copies);
01281   varDeepCopyField(smb, copies);
01282   varDeepCopyField(snV, copies);
01283   varDeepCopyField(snb, copies);
01284   varDeepCopyField(mu, copies);
01285   varDeepCopyField(sm, copies);
01286   varDeepCopyField(sn, copies);
01287   varDeepCopyField(mu_noisy, copies);
01288   varDeepCopyField(tangent_plane, copies);
01289   varDeepCopyField(tangent_targets_and_point, copies);
01290   varDeepCopyField(sum_nll, copies);
01291   varDeepCopyField(min_sig, copies);
01292   varDeepCopyField(min_d, copies);
01293   varDeepCopyField(embedding, copies);
01294 
01295   deepCopyField(dist, copies);
01296   deepCopyField(ith_step_generated_set, copies);
01297   deepCopyField(train_nearest_neighbors, copies);
01298   deepCopyField(validation_nearest_neighbors, copies);
01299   deepCopyField(Bs, copies);
01300   deepCopyField(Fs, copies);
01301   deepCopyField(mus, copies);
01302   deepCopyField(sms, copies);
01303   deepCopyField(sns, copies);
01304   deepCopyField(Ut_svd, copies);
01305   deepCopyField(V_svd, copies);
01306   deepCopyField(S_svd, copies);
01307   deepCopyField(dk, copies);
01308 
01309   deepCopyField(parameters, copies);
01310   deepCopyField(optimizer, copies);
01311   deepCopyField(predictor, copies);
01312   deepCopyField(output_f, copies);
01313   deepCopyField(output_f_all, copies);
01314   deepCopyField(projection_error_f, copies);
01315   deepCopyField(noisy_data, copies);
01316 }
01317 
01318 
01319 int GaussianContinuum::outputsize() const
01320 {
01321   return 1;
01322   /*
01323   if(output_type == "tangent_plane_variance_normalized")
01324     return output_f->outputsize-1;
01325   else
01326     return output_f->outputsize;
01327   */
01328 }
01329 
01330 void GaussianContinuum::forget()
01331 {
01332   if (train_set) initializeParams();
01333   stage = 0;
01334 }
01335     
01336 void GaussianContinuum::train()
01337 {
01338 
01339   // Creation of points for matlab image matrices
01340 
01341   if(save_image_mat)
01342   {
01343     if(n != 2) PLERROR("In GaussianContinuum::train(): Image matrix creation is only implemented for 2d problems");
01344     
01345     real step_x = (upper_x-lower_x)/(points_per_dim-1);
01346     real step_y = (upper_y-lower_y)/(points_per_dim-1);
01347     image_points_mat.resize(points_per_dim*points_per_dim,n);
01348     for(int i=0; i<points_per_dim; i++)
01349       for(int j=0; j<points_per_dim; j++)
01350       {
01351         image_points_mat(i*points_per_dim + j,0) = lower_x + j*step_x;
01352         image_points_mat(i*points_per_dim + j,1) = lower_y + i*step_y;
01353       }
01354 
01355     image_points_vmat = VMat(image_points_mat);
01356   }
01357 
01358   // find nearest neighbors...
01359 
01360   // ... on the training set
01361   
01362   for(int t=0; t<train_set.length(); t++)
01363   {
01364     train_set->getRow(t,t_row);
01365     TVec<int> nn = train_nearest_neighbors(t);
01366     computeNearestNeighbors(train_set, t_row, nn, t);
01367   }
01368   
01369   // ... on the validation set
01370   
01371   for(int t=0; t<valid_set.length(); t++)
01372   {
01373     valid_set->getRow(t,t_row);
01374     TVec<int> nn = validation_nearest_neighbors(t);
01375     computeNearestNeighbors(train_set, t_row, nn);
01376   }
01377 
01378   VMat train_set_with_targets;
01379   VMat targets_vmat;
01380   if (!cost_of_one_example)
01381     PLERROR("GaussianContinuum::train: build has not been run after setTrainingSet!");
01382 
01383   targets_vmat = local_neighbors_differences(train_set, n_neighbors, false, true);
01384 
01385   train_set_with_targets = hconcat(train_set, targets_vmat);
01386   train_set_with_targets->defineSizes(inputsize()+inputsize()*n_neighbors+1+n_neighbors,0);
01387   int l = train_set->length();  
01388   //log_n_examples->value[0] = log(real(l));
01389   int nsamples = batch_size>0 ? batch_size : l;
01390 
01391   Var totalcost = meanOf(train_set_with_targets, cost_of_one_example, nsamples);
01392 
01393   if(optimizer)
01394     {
01395       optimizer->setToOptimize(parameters, totalcost);  
01396       optimizer->build();
01397     }
01398   else PLERROR("GaussianContinuum::train can't train without setting an optimizer first!");
01399   
01400   // number of optimizer stages corresponding to one learner stage (one epoch)
01401   int optstage_per_lstage = l/nsamples;
01402 
01403   PP<ProgressBar> pb;
01404   if(report_progress>0)
01405     pb = new ProgressBar("Training GaussianContinuum from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01406 
01407   t_row.resize(train_set.width());
01408 
01409   int initial_stage = stage;
01410   bool early_stop=false;
01411   while(stage<nstages && !early_stop)
01412     {
01413       optimizer->nstages = optstage_per_lstage;
01414       train_stats->forget();
01415       optimizer->early_stop = false;
01416       optimizer->optimizeN(*train_stats);
01417       train_stats->finalize();
01418       if(verbosity>2)
01419         cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
01420       ++stage;
01421       if(pb)
01422         pb->update(stage-initial_stage);
01423       
01424       if(stage != 0 && stage%compute_cost_every_n_epochs == 0)
01425       {
01426         compute_train_and_validation_costs();
01427       }
01428     }
01429   if(verbosity>1)
01430     cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
01431 
01432   update_reference_set_parameters();
01433 
01434   cout << "best train: " << get_nll(train_set,train_set,0,n_neighbors_density) << endl;
01435   cout << "best validation: " << get_nll(train_set,valid_set,0,n_neighbors_density) << endl;
01436 
01437   // test computeOutput and Costs
01438 
01439   real nll_train = 0;
01440   Vec costs(1);
01441   Vec target;
01442   for(int i=0; i<train_set.length(); i++)
01443   {
01444     train_set->getRow(i,t_row);
01445     computeCostsOnly(t_row,target,costs);
01446     nll_train += costs[0];
01447   }
01448   nll_train /= train_set.length();
01449   cout << "nll_train: " << nll_train << endl;
01450   
01451   /*
01452   int n_test_gen_points = 3;
01453   int n_test_gen_generated = 30;
01454   Mat noisy_data_set(n_test_gen_points*n_test_gen_generated,n);
01455   
01456   for(int k=0; k<n_test_gen_points; k++)
01457   {
01458     for(int t=0; t<n_test_gen_generated; t++)
01459     {
01460       valid_set->getRow(k,t_row);
01461       Vec noisy_point = noisy_data_set(k*n_test_gen_generated+t);
01462       noisy_point << noisy_data(t_row);
01463     }
01464     PLearn::save("noisy_data.psave",noisy_data_set);
01465   }
01466   */
01467   
01468   if(n==2 && save_image_mat)
01469   {
01470     Mat test_set(valid_set.length(),valid_set.width());
01471     Mat m_dir(valid_set.length(),n);
01472     Mat n_dir(valid_set.length(),n);
01473     for(int t=0; t<valid_set.length(); t++)
01474     {
01475       valid_set->getRow(t,t_row);
01476       test_set(t) << t_row;
01477       output_f_all(t_row);
01478       Vec noise_direction = n_dir(t);
01479       noise_direction[0] = tangent_plane->value[1];
01480       noise_direction[1] = -1*tangent_plane->value[0];
01481       Vec manifold_direction = m_dir(t);
01482       manifold_direction << tangent_plane->value;
01483       noise_direction *= sqrt(sn->value[0])/norm(noise_direction,2);
01484       manifold_direction *= sqrt(sm->value[0])/norm(manifold_direction,2);
01485     }
01486     PLearn::save("test_set.psave",test_set);
01487     PLearn::save("m_dir.psave",m_dir);
01488     PLearn::save("n_dir.psave",n_dir);
01489   }
01490   
01491 
01492   if(n_random_walk_step > 0)
01493   {
01494     make_random_walk();
01495     update_reference_set_parameters();
01496   }
01497   
01498   if(save_image_mat)
01499   {
01500     cout << "Creating image matrix" << endl;
01501     get_image_matrix(train_set, image_points_vmat, 0,"image.psave", n_neighbors_density);
01502 
01503     image_prob_mat.resize(points_per_dim,points_per_dim);
01504     Mat image_points(points_per_dim*points_per_dim,2);
01505     Mat image_mu_vectors(points_per_dim*points_per_dim,2);
01506     //Mat image_sigma_vectors(points_per_dim*points_per_dim,2);
01507     for(int t=0; t<image_points_vmat.length(); t++)
01508     {
01509       image_points_vmat->getRow(t,t_row);
01510      
01511       output_f_all(t_row);
01512 
01513       image_points(t,0) = t_row[0];
01514       image_points(t,1) = t_row[1];
01515       
01516       image_mu_vectors(t) << mu->value;
01517     }
01518     PLearn::save("image_points.psave",image_points);
01519     PLearn::save("image_mu_vectors.psave",image_mu_vectors);
01520 
01521     if(n_random_walk_step > 0)
01522     {
01523       string path = "image_rw_" + tostring(0) + ".psave";
01524 
01525       get_image_matrix(reference_set, image_points_vmat, 0, path, n_neighbors_density*n_random_walk_per_point);
01526       
01527       for(int i=0; i<n_random_walk_step; i++)
01528       {
01529         if(i == n_random_walk_step - 1)
01530           path = "image_rw_last.psave";
01531         else
01532           path = "image_rw_" + tostring(i+1) + ".psave";
01533 
01534         get_image_matrix(reference_set, image_points_vmat, i*train_set.length()*n_random_walk_per_point+train_set.length(),path,n_neighbors_density*n_random_walk_per_point);
01535       }
01536 
01537       cout << "NLL random walk on train: " << get_nll(reference_set,train_set,(n_random_walk_step-1)*train_set.length()*n_random_walk_per_point+train_set.length(),n_neighbors_density*n_random_walk_per_point) << endl;
01538       cout << "NLL random walk on validation: " << get_nll(reference_set,valid_set,(n_random_walk_step-1)*train_set.length()*n_random_walk_per_point+train_set.length(),n_neighbors_density*n_random_walk_per_point) << endl;
01539     }
01540   }
01541 
01542 }
01543 
01544 void GaussianContinuum::initializeParams()
01545 {
01546   if (seed_>=0)
01547     manual_seed(seed_);
01548   else
01549     PLearn::seed();
01550 
01551   if (architecture_type=="embedding_neural_network")
01552   {
01553     real delta = 1.0 / sqrt(real(inputsize()));
01554     fill_random_uniform(V->value, -delta, delta);
01555     delta = 1.0 / real(n_hidden_units);
01556     fill_random_uniform(W->matValue, -delta, delta);
01557     c->value.clear();
01558     fill_random_uniform(smV->matValue, -delta, delta);
01559     smb->value.clear();
01560     fill_random_uniform(smV->matValue, -delta, delta);
01561     snb->value.clear();
01562     fill_random_uniform(snV->matValue, -delta, delta);
01563     fill_random_uniform(muV->matValue, -delta, delta);
01564   }
01565   else if (architecture_type=="single_neural_network")
01566   {
01567     real delta = 1.0 / sqrt(real(inputsize()));
01568     fill_random_uniform(V->value, -delta, delta);
01569     delta = 1.0 / real(n_hidden_units);
01570     fill_random_uniform(W->matValue, -delta, delta);
01571     c->value.clear();
01572     fill_random_uniform(smV->matValue, -delta, delta);
01573     smb->value.clear();
01574     fill_random_uniform(smV->matValue, -delta, delta);
01575     snb->value.clear();
01576     fill_random_uniform(snV->matValue, -delta, delta);
01577     fill_random_uniform(muV->matValue, -delta, delta);
01578     b->value.clear();
01579   }
01580   else PLERROR("other types not handled yet!");
01581   
01582   for(int i=0; i<p_x.length(); i++)
01583     p_x->value[i] = 1.0/p_x.length();
01584 
01585   if(optimizer)
01586     optimizer->reset();
01587 }
01588 
01589 
01590 void GaussianContinuum::computeOutput(const Vec& input, Vec& output) const
01591 {
01592   // compute density
01593   real ret = 0;
01594 
01595   // fetching nearest neighbors for density estimation
01596   knn(reference_set,input,n_neighbors_density,t_nn,bool(0));
01597   t_row << input;
01598   for(int neighbor=0; neighbor<t_nn.length(); neighbor++)
01599   {
01600     reference_set->getRow(t_nn[neighbor],neighbor_row);
01601     substract(t_row,neighbor_row,x_minus_neighbor);
01602     substract(x_minus_neighbor,mus(t_nn[neighbor]),z);
01603     product(w, Bs[t_nn[neighbor]], z);
01604     transposeProduct(zm, Fs[t_nn[neighbor]], w);
01605     substract(z,zm,zn);
01606     ret += exp(-0.5*(pownorm(zm,2)/sms[t_nn[neighbor]] + pownorm(zn,2)/sns[t_nn[neighbor]] 
01607                                + n_dim*log(sms[t_nn[neighbor]]) + (n-n_dim)*log(sns[t_nn[neighbor]])) - n/2.0 * Log2Pi);
01608   }
01609   ret /= reference_set.length();
01610   output[0] = ret;
01611   /*
01612   if(output_type == "tangent_plane_variance_normalized")
01613   {
01614     int nout = outputsize()+1;
01615     Vec temp_output(nout);
01616     temp_output << output_f(input);
01617     Mat F = temp_output.subVec(0,temp_output.length()-1).toMat(n_dim,n);
01618     if(n_dim*n != temp_output.length()-1) PLERROR("WHAT!!!");
01619     for(int i=0; i<F.length(); i++)
01620     {
01621       real norm = pownorm(F(i),1);
01622       F(i) *= sqrt(temp_output[temp_output.length()-1])/norm;
01623     }
01624     
01625     output.resize(temp_output.length()-1);
01626     output << temp_output.subVec(0,temp_output.length()-1);
01627   }
01628   else
01629   {
01630     int nout = outputsize();
01631     output.resize(nout);
01632     output << output_f(input);
01633   }
01634   */
01635 }    
01636 
01637 void GaussianContinuum::computeCostsFromOutputs(const Vec& input, const Vec& output, 
01638                                              const Vec& target, Vec& costs) const
01639 {
01640   costs[0] = -log(output[0]);
01641 }                                
01642 
01643 TVec<string> GaussianContinuum::getTestCostNames() const
01644 {
01645   return getTrainCostNames();
01646 }
01647 
01648 TVec<string> GaussianContinuum::getTrainCostNames() const
01649 {
01650   TVec<string> cost(1); cost[0] = "NLL";
01651   return cost;
01652 }
01653 
01654 
01655 
01656 } // end of namespace PLearn
01657 
01658 
01659 /*
01660   Local Variables:
01661   mode:c++
01662   c-basic-offset:4
01663   c-file-style:"stroustrup"
01664   c-file-offsets:((innamespace . 0)(inline-open . 0))
01665   indent-tabs-mode:nil
01666   fill-column:79
01667   End:
01668 */
01669 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines