PLearn 0.1
GaussianContinuumDistribution.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianContinuumDistribution.cc
00004 //
00005 // Copyright (C) 2004 Yoshua Bengio & Hugo Larochelle 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036    * $Id: GaussianContinuumDistribution.cc 9418 2008-09-02 15:33:46Z nouiz $
00037    ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio & Martin Monperrus
00040 
00044 #include "GaussianContinuumDistribution.h"
00045 #include <plearn/vmat/LocalNeighborsDifferencesVMatrix.h>
00046 #include <plearn/var/ProductVariable.h>
00047 #include <plearn/var/PlusVariable.h>
00048 #include <plearn/var/SoftplusVariable.h>
00049 #include <plearn/var/VarRowsVariable.h>
00050 #include <plearn/var/VarRowVariable.h>
00051 #include <plearn/var/SourceVariable.h>
00052 #include <plearn/var/Var_operators.h>
00053 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00054 #include <plearn/math/random.h>
00055 #include <plearn/var/SumOfVariable.h>
00056 #include <plearn/var/TanhVariable.h>
00057 #include <plearn/var/NllSemisphericalGaussianVariable.h>
00058 #include <plearn/var/DiagonalizedFactorsProductVariable.h>
00059 #include <plearn/math/random.h>
00060 #include <plearn/math/plapack.h>
00061 #include <plearn/var/ColumnSumVariable.h>
00062 #include <plearn/vmat/VMat_basic_stats.h>
00063 #include <plearn/vmat/ConcatRowsVMatrix.h>
00064 #include <plearn/vmat/SubVMatrix.h>
00065 #include <plearn/var/PDistributionVariable.h>
00066 #include <plearn_learners/distributions/UniformDistribution.h>
00067 #include <plearn_learners/distributions/GaussianDistribution.h>
00068 #include <plearn/display/DisplayUtils.h>
00069 #include <plearn/opt/GradientOptimizer.h>
00070 #include <plearn/var/TransposeVariable.h>
00071 #include <plearn/var/Var_utils.h>
00072 #include <plearn/var/ConcatRowsVariable.h>
00073 #include <plearn/var/RowSumVariable.h>
00074 #include <plearn/var/ReshapeVariable.h>
00075 #include <plearn/var/SquareVariable.h>
00076 #include <plearn/var/ExpVariable.h>
00077 #include <plearn/var/NoBpropVariable.h>
00078 #include <plearn/var/ThresholdBpropVariable.h>
00079 #include <plearn/io/load_and_save.h>
00080 #include <plearn/vmat/VMat_computeNearestNeighbors.h>
00081 #include <plearn/vmat/FractionSplitter.h>
00082 #include <plearn/vmat/RepeatSplitter.h>
00083 
00084 namespace PLearn {
00085 using namespace std;
00086 
00087 
00088 GaussianContinuumDistribution::GaussianContinuumDistribution() 
00089 /* ### Initialize all fields to their default value here */
00090   : weight_mu_and_tangent(0), include_current_point(false), random_walk_step_prop(1), use_noise(false),use_noise_direction(false), noise(-1), noise_type("uniform"), n_random_walk_step(0), n_random_walk_per_point(0),walk_on_noise(true),min_sigma(0.00001), min_diff(0.01),fixed_min_sigma(0.00001), fixed_min_diff(0.01), min_p_x(0.001),sm_bigger_than_sn(true), n_neighbors(5), n_neighbors_density(-1), mu_n_neighbors(2), n_dim(1), sigma_grad_scale_factor(1), update_parameters_every_n_epochs(5), variances_transfer_function("softplus"), architecture_type("single_neural_network"),
00091     n_hidden_units(-1), batch_size(1), norm_penalization(0), svd_threshold(1e-5)
00092 {
00093 }
00094 
00095 PLEARN_IMPLEMENT_OBJECT(GaussianContinuumDistribution, "Learns a continuous (uncountable) Gaussian mixture with non-local parametrization",
00096                         "This learner implicitly estimates the density of the data through\n"
00097                         "a generalization of the Gaussian mixture model and of the TangentLearner\n"
00098                         "algorithm (see help on that class). The density is the fixed point of\n"
00099                         "a random walk {z_t} that follows the following transition probabilities:\n"
00100                         "   z_{t+1} sampled from a Gaussian associated with z_t, centered\n"
00101                         "   at z_t + mu(z_t), with covariance matrix S(z_t).\n"
00102                         "The semantic of that random walk is the following (and that is how\n"
00103                         "it will be estimated). Given a point z_t, the sample z_{t+1} represents\n"
00104                         "a 'near neighbor' of z_t. We assume that the density is smooth enough\n"
00105                         "that the cloud of 'near neighbors' around z_t can be modeled by a Gaussian.\n"
00106                         "The functions mu(.) and S(.) have globally estimated parameters (for example\n"
00107                         "using neural nets or linear functions of x, or linear functions of a basis).\n"
00108                         "Here we suppose that the eigenvalues of S(.) come from two groups:\n"
00109                         "the first group should correspond to locally estimated principal\n"
00110                         "directions of variations and there are no constraints on these eigenvalues\n"
00111                         "(except that they are positive), while the second group should correspond\n"
00112                         "to 'noise' directions, that have all the same value sigma2_noise\n"
00113                         "i.e. it is not necessary to explicitly model the directions of variations\n"
00114                         "(the eigenvectors) associated with the second group. In general we expect\n"
00115                         "sigma2_noise to be small compared to the first group eigenvalues, which\n"
00116                         "means that the Gaussians are flat in the corresponding directions, and\n"
00117                         "that the first group variations correspond to modeling a manifold near\n"
00118                         "which most of the data lie. Optionally, an embedding corresponding\n"
00119                         "to variations associated with the first group of eigenvalues can be learnt\n"
00120                         "by choosing for the architecture_type option a value of the form embedding_*.\n"
00121                         "Although the density is not available in closed form, it is easy (but maybe slow)\n"
00122                         "to sample from it: pick one of the training examples at random and then\n"
00123                         "follow the random walk (ideally, a long time). It is also possible in\n"
00124                         "principle to obtain a numerical estimate of the density at a point x,\n"
00125                         "by sampling enough random walk points around x.\n"
00126                         );
00127 
00128 /* MATHEMATICAL DETAILS
00129 
00130 * Fixed point of the random walk is the density:
00131 
00132   Let p(Z_t) represent the density of the t-th random walk sample Z_t (a r.v.).
00133   To obtain p(Z_{t+1}) we sample Z_t from p(Z_t) and then sample Z_{t+1}
00134   from p(Z_{t+1}|Z_t), using the Gaussian with mean z_t + mu(z_t) and 
00135   covariance matrix S(z_t). Thus p(Z_{t+1}=x) = \int_y p(Z_t=y) p(Z_{t+1}=x|Z_t=y) dy.
00136   Then at the fixed point we should have p(Z_t) = p(Z_{t+1)=p(X), i.e.
00137     p(x) = \int_y p(y) p(x|y) dy
00138   which has the same form as a Gaussian mixture, with p(x|y) Gaussian in x,
00139   and the sum replaced by an integral (i.e. there is an uncountable 'number'
00140   of Gaussian components, one at each position y in space). It is possible
00141   to achieve this only because each Gaussian component p(x|y) has mean and variance that
00142   depend on y and on global parameters theta, and those parameters are estimated
00143   from data everywhere, and might generalize to new places.
00144 
00145 * How to estimate the density numerically:
00146 
00147   Although the density cannot be computed exactly, it can be estimated
00148   using a Gaussian mixture with a finite number of components. Suppose that
00149   we have sampled a set R of random samples on the above random walks
00150   (including also the training data, which we know to come from the
00151   true density). Then we obtain a Monte-Carlo approximation of
00152   the above equation as follows:
00153     p(x) ~=~ average_t p(x|x_t)
00154 
00155   where x_t is in R (i.e. sampled from the distribution p(y)).  This is
00156   simply a uniformly weighted Gaussian mixture centered on the data points
00157   and on the random walk points. If we want to get a more precise estimator
00158   of p(x), we should sample points more often around x, but then correct
00159   this bias, using importance sampling. A simple way to do this is to
00160   choose more points from to put in R in such a way as to give more
00161   preference to the near neighbors of x in R. Let q_x(x_t) be a discrete
00162   distribution over the elements of R which is non-zero everywhere but
00163   puts more weight on the neighbors of x. Then we create new samples,
00164   to be put in a set R', by performing random walks starting from 
00165   points of R with probability q_x(x_t). The resulting estimator
00166   would be
00167     p(x) ~=~ average_{x_t in R'} p(x|x_t) / (q_x(x_t) |R'|).
00168 
00169 * How to estimate mu(x) and S(x)?
00170 
00171   We propose to estimate mu(x) and S(x) by minimizing the negative
00172   log-likelihood of the neighbors x_j of each training point x_i,
00173   according to the Gaussian with mean x_i + mu(x_i) and covariance
00174   matrix S(x_i), plus possibly some regularization term, such
00175   as weight decay on the parameters of the functions. In this 
00176   implementation training proceeds by stochastic gradient, visiting
00177   each example x_i (with all of its neighbors) and then making
00178   a parameter update.
00179 
00180 * Parametrization of mu(x) and S(x):
00181 
00182   mu(x) is simply the output of a linear or neural-net function of x.
00183   S(x) is more difficult to parametrize. We consider two main solutions
00184   here: (1) semi-spherical (only two variances are considered: on the
00185   manifold and orthogonal to it), or (2) factor model with Cholesky
00186   decomposition for the manifold directions and a single shared variance
00187   for the directions orthogonal to the manifold. Note that we
00188   would prefer to parametrize S(x) in such a way as to make it
00189   easy to compute , v'S(x)^{-1}v for any vector v, and log(det(S(x))).
00190 
00191   Consider the derivative of NLL == -log(p(y)) wrt log(p(y|x)):
00192     d(-log(p(y)))/d(log(p(y|x))) = -p(y|x)p(x)/p(y) = -p(x|y).
00193   (this also corresponds to the 'posterior' factor in EM).
00194 
00195   The conditional log-likelihood  log(p(y|x)) for a neighbor y
00196   of an example x is written
00197     log(p(y|x)) = -0.5(y-x-mu(x))'S(x)^{-1}(y-x-mu(x)) - 0.5*log(det(S(x))) - (n/2)log(2pi).
00198 
00199   Hence dNLL/dtheta is obtained from
00200     0.5 p(x|y) (d((y-x-mu(x))'S(x)^{-1}(y-x-mu(x)))/dtheta + d(log(det(S(x))))/dtheta)       (1)
00201   which gives significant weight only to the near neighbors y of x.
00202 
00203   The gradient wrt mu(x) is in particular
00204     dNLL/dmu(x) = p(x|y) S(x)^{-1} (mu(x)+x-y).
00205 
00206 * Semi-spherical covariance model:
00207 
00208   The idea of the semi-spherical model is that we assume that the neighbors difference
00209   vector y-x has two components: (a) one along the tangent plane of the manifold, spanned
00210   by a set vectors F_i(x), the rows of F(x) a matrix-valued unconstrained estimated function,
00211   and (b) one orthogonal to that tangent plane. We write z = y-x-mu(x) = z_m + z_n, with z_m the
00212   component on the manifold and z_n the noise component. Since we want z_n orthogonal
00213   to the tangent plane, we choose it such that F z_n = 0. Since z_m is in the span
00214   of the rows F_i of F, we can write it as a linear combination of these rows, with
00215   weights w_i. Let w=(w_1,...w_d), then z_m = F'w. To find w, it is enough to find
00216   the projection of y-x along the tangent plane, which corresponds to the shortest
00217   possible z_n. Minimizing the norm of z_n, equal to ||z-F'w||^2 yields the first order equation
00218       F(z-F'w) = 0
00219   i.e. the solution is
00220       w = (FF')^{-1} Fz.
00221   In practice, this will be done by using a singular value decomposition of F',
00222       F' = U D V'
00223   so w = V D^{-2} V' F z = V D^{-2} V' V D U' z = V D^{-1} U' z. Note that
00224   z_m' z_n = w'F (z - F'w) = 0 hence z_m orthogonal to z_n.
00225 
00226   By our model, the covariance matrix can be decomposed in two parts,
00227     S(x) = sigma2_manifold U U'  + sigma2_noise N N'
00228   where M=[U | N] is the matrix whose columns are eigenvectors of S(x), with U the e-vectors
00229   along the manifold directions and N the e-vectors along the noise directions.
00230   It is easy to show that one does not need to explicitly represent the
00231   noise eigenvectors N, because both the columns of U and the columns of N
00232   are also eigenvectors of the identity matrix. Hence
00233    S(x) = (sigma2_manifold - sigma2_noise) U U' + sigma2_noise I.
00234   with I the nxn identity matrix.
00235   This can be shown by re-writing  I = [U | N]' [U | N] and appriate algebra.
00236 
00237   It is also easy to show that S(x)^{-1} z = (1/sigma2_manifold) z_m + (1/sigma2_noise) z_n,
00238   that the quadratic form is 
00239      z' S(x)^{-1} z = (1/sigma2_manifold) ||z_m||^2 + (1/sigma2_noise) ||z_n||^2,          (2)
00240   and that 
00241      log(det(S(x))) = d log(sigma2_manifold) + (n-d) log(sigma2_noise).                    (3)
00242 
00243   How to show the above:
00244     @ We have defined S(x) = M diag(s) M' where s is a vector whose first d elements are sigma2_manifold
00245     and last n-d elements are sigma2_noise, and M=[U | N] are the eigenvectors, or
00246       S(x) = sum_{i=1}^d sigma2_manifold U_i U_i' + sum_{i=d+1}^n sigma2_noise N_i N_i'
00247     where U_i is a column of U and N_i a column of N. Hence
00248       S(x) = sigma2_manifold sum_{i=1}^d U_i U_i' - sigma2_noise sum_{i=1}^d U_i U_i'
00249              + sigma2_noise (sum_{i=1}^d U_i U_i' + sum_{i=d+1}^n  N_i N_i')
00250            = (sigma2_manifold - sigma2_noise) sum_{i=1}^d U_i U_i' + sigma2_noise I 
00251            = (sigma2_manifold - sigma2_noise) U U' + sigma2_noise I 
00252     since sum_{i=1}^n M_i M_i' = M M' = I (since M is full rank).
00253 
00254     @ S(x)^{-1} = M diag(s)^{-1} M' = (1/sigma2_manifold - 1/sigma2_noise) U U' + 1/sigma2_noise I 
00255     using the same argument as above but replacing all sigma2* by 1/sigma2*.
00256 
00257     @ Hence S(x)^{-1} z = S(x)^{-1} (z_m + z_n) 
00258                       = (1/sigma2_manifold - 1/sigma2_noise) z_m + 1/sigma2_noise (z_m + z_n)
00259                       = 1/sigma2_manifold z_m + 1/sigma2_noise z_n
00260     where on the second line we used the fact that U U' acts as the identity
00261     matrix for vectors spanned by the columns of U, which can be shown as follows.
00262     Let z_m = sum_i a_i U_i. Then U U' z_m = sum_i a_i U U' U_i = sum_i a_i U e_i = sum_i a_i U_i = z_m.
00263 
00264     @ Also, z' S(x)^{-1} z = (z_m + z_n) (1/sigma2_manifold z_m + 1/sigma2_noise z_n)
00265                          = 1/sigma2_manifold ||z_m||^2 + 1/sigma2_noise ||z_n||^2
00266     since by construction z_m . z_n = 0.
00267 
00268     @ Finally, log(det(S(x))) = sum_{i=1}^n log(s_i) 
00269                               = sum_{i=1}^d log(sigma2_manifold) + sum_{i=d+1}^n log(sigma2_noise)
00270                               = d log(sigma2_manifold) + (n-d) log(sigma2_noise).
00271 
00272                               
00273 * Gradients on covariance for the semi-spherical model:
00274 
00275   We have already shown the gradient of NLL on mu(x) above. We need
00276   also here the gradient on sigma2_manifold, sigma2_noise, and F, all
00277   three of which are supposed to be functions of x (e.g. outputs of
00278   a neural network, so we need to provide the gradient on the output
00279   units of the neural network). Note that the sigma2's must be constrained
00280   to be positive (e.g. by squaring the output, using an exponential
00281   or softplus activation function).
00282 
00283     dNLL/dsigma2_manifold = 0.5 p(x|y) ( d/sigma2_manifold - ||z_m||^2/sigma2_manifold^2)
00284 
00285   N.B. this is the gradient on the variance, not on the standard deviation.
00286 
00287   Proof: Recall eq.(1) and let theta = dsigma2_manifold. Using eq.(2) we obtain
00288   for the first term in (1): 
00289     d/dsigma2_manifold (0.5/sigma2_manifold ||z_m||^2) = -0.5||z_m||^2/sigma2_manifold^2.
00290   Using (3) we obtain the second term 
00291     d/dsigma2_manifold (0.5 d log(sigma2_manifold)) = 0.5 d/sigma2_manifold.
00292 
00293   The same arguments yield the following for the gradient on sigma2_noise:
00294 
00295     dNLL/dsigma2_noise = 0.5 p(x|y) ( (n-d)/sigma2_noise - ||z_n||^2/sigma2_noise^2)
00296 
00297 
00298   Now let us consider the more difficult case of the theta = F_{ij} (i in {1..d}, j in {1..n}).
00299   The result is fortunately simple to write:
00300 
00301     dNLL/dF = p(x|y) (1/sigma2_manifold - 1/sigma2_noise) w z_n'
00302 
00303   Proof: First we see that the second term in eq.(1) does not depend on F because of eq.(3).
00304   For the first term of eq.(1), we obtain using (2)
00305     d(0.5 z'S(x)^{-1} z)/dF_{ij} 
00306       = d/dF_{ij} ((0.5/sigma2_manifold) ||z_m||^2 + (0.5/sigma2_noise) ||z_n||^2)
00307       = d/dF_{ij} ((0.5/sigma2_manifold) ||F'w||^2 + (0.5/sigma2_noise) ||z-F'w||^2)
00308       = (1/sigma2_manifold) (F'w)' d(F'w)/dF_{ij} + (1/sigma2_noise) z_n' d(z-F'w)/dF_{ij} 
00309       = (1/sigma2_manifold) (F'w)' d(F'w)/dF_{ij} - (1/sigma2_noise) z_n' d(F'w)/dF_{ij} (4)
00310   Note that w depends of F so we will have to compute two components:
00311     d(F'w)/dF_{ij} = w_i e_j + F' dw/dF_{ij}                                        (5)
00312   Now recall how w depends on F: w = (FF')^{-1} F z, and recall the identity 
00313   d(A^{-1})/dx = -A^{-1} dA/dx A^{-1} for square matrix A. Hence
00314     dw/dF_{ij} = - (FF')^{-1} d(FF')/dF_{ij} (FF')^{-1} F z + (FF')^{-1} dF/dF_{ij} z
00315                = - (FF')^{-1} ( F e_j e_i' + e_i e_j' F') w + (FF')^{-1} e_i e_j' z
00316   where we have replaced (FF')^{-1}Fz by w in the last factor of the first term, and
00317   where e_i is the d-vector with all 0's except a 1 at position i, and e_j is the n-vector
00318   with all 0's except a 1 at position j. It is easy to see that dF/dF_{ij} = e_i e_j'
00319   which is the matrix with all 0's except at position (i,j). Then 
00320     d(FF')/dF_{ij} = F (dF/dF_{ij})' + dF/dF_{ij} F' = F e_j e_i' + e_i e_j' F'.
00321   
00322   We are now ready to plug pop back and plug all these results together. First we plug
00323   the above in (5):
00324    d(F'w)/dF_{ij} = w_i e_j + F' (FF')^{-1} e_i e_j' z - (FF')^{-1} ( F e_j e_i' + e_i e_j' F') w
00325   then plug this back in (4) noting that FF' cancels with (FF')^{-1} everywhere in the sigma2_manifold term
00326    d(0.5 z'S(x)^{-1} z)/dF_{ij} =
00327      (1/sigma2_manifold)  (w'F w_i e_j + w'e_i e_j' z - w'(F e_j e_i' + e_i e_j' F') w
00328      - (1/sigma2_noise) w_i z_n'e_j
00329    using z_n'F' = 0, and z_n' (FF')^{-1} = z_n'VD^{-2}V'=0 since z_n is orthogonal to every column of V.
00330    Note: F'(FF')^{-1}F = UDV'(VD^{-2}V')VDU' = UU', and UU'z = UU'(z_m+z_n) = z_m to simplify last term.
00331    In the sigma2_manifold term let us use the facts that (a) each sub-term is a scalar, (b) tr(AB)=tr(BA),
00332    (c) scalar = scalar', and (e) e_i'A e_j = A_{ij} to write everything in matrix form:
00333        (1/sigma2_manifold)  (w'F e_j w_i  + w'e_i e_j' z - w'(F e_j e_i' + e_i e_j' F') w)
00334      = (1/sigma2_manifold)  (w'F e_j e_i' w + z'e_j e_i' w - w'F e_j e_i'w - z'UU'e_j e_i'w
00335      = (1/sigma2_manifold)  (e_i'ww'F e_j + e_i'wz'e_j - e_i'ww'Fe_j - e_i'w z_m' e_j
00336      = (1/sigma2_manifold)  (ww'F  + wz' - ww'F - w z_m')_{ij}
00337      = (1/sigma2_manifold)  (wz' - w z_m')_{ij}
00338      = (1/sigma2_manifold)  (w z_n')_{ij}
00339    Now let us do the sigma2_noise term:
00340        (1/sigma2_noise) w_i z_n'e_j = (1/sigma2_noise) e_i' w z_n'e_j = (1/sigma2_noise) (w z_n')_{ij}
00341    Putting the sigma2_manifold term and the sigma2_noise term together we obtain in matrix form
00342     d(0.5 z'S(x)^{-1} z)/dF = (1/sigma2_manifold) w z_n' - (1/sigma2_noise) w z_n'
00343    i.e. the final result
00344     d(0.5 z'S(x)^{-1} z)/dF = (1/sigma2_manifold - 1/sigma2_noise) w z_n'
00345    which gives (using dlog(det(S(x)))/dF = ), the claimed statement:
00346     dNLL/dF = p(x|y) (1/sigma2_manifold - 1/sigma2_noise) w z_n'
00347 
00348 */
00349 
00350 void GaussianContinuumDistribution::declareOptions(OptionList& ol)
00351 {
00352   // ### Declare all of this object's options here
00353   // ### For the "flags" of each option, you should typically specify  
00354   // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00355   // ### OptionBase::tuningoption. Another possible flag to be combined with
00356   // ### is OptionBase::nosave
00357 
00358   declareOption(ol, "weight_mu_and_tangent", &GaussianContinuumDistribution::weight_mu_and_tangent, OptionBase::buildoption,
00359                 "Weight of the cost on the scalar product between the manifold directions and mu.\n"
00360                 );
00361 
00362   declareOption(ol, "include_current_point", &GaussianContinuumDistribution::include_current_point, OptionBase::buildoption,
00363                 "Indication that the current point should be included in the nearest neighbors.\n"
00364                 );
00365 
00366   declareOption(ol, "n_neighbors", &GaussianContinuumDistribution::n_neighbors, OptionBase::buildoption,
00367                 "Number of nearest neighbors to consider for gradient descent.\n"
00368                 );
00369 
00370   declareOption(ol, "n_neighbors_density", &GaussianContinuumDistribution::n_neighbors_density, OptionBase::buildoption,
00371                 "Number of nearest neighbors to consider for p(x) density estimation.\n"
00372                 );
00373 
00374   declareOption(ol, "mu_n_neighbors", &GaussianContinuumDistribution::mu_n_neighbors, OptionBase::buildoption,
00375                 "Number of nearest neighbors to learn the mus (if < 0, mu_n_neighbors = n_neighbors).\n"
00376                 );
00377 
00378   declareOption(ol, "n_dim", &GaussianContinuumDistribution::n_dim, OptionBase::buildoption,
00379                 "Number of tangent vectors to predict.\n"
00380                 );
00381 
00382   declareOption(ol, "update_parameters_every_n_epochs", &GaussianContinuumDistribution::update_parameters_every_n_epochs, OptionBase::buildoption,
00383                 "Frequency of the update of the stored parameters of the reference set. \n"
00384                 );
00385 
00386   declareOption(ol, "sigma_grad_scale_factor", &GaussianContinuumDistribution::sigma_grad_scale_factor, OptionBase::buildoption,
00387                 "Scaling factor of the gradient on the sigmas. \n"
00388                 );
00389 
00390   declareOption(ol, "optimizer", &GaussianContinuumDistribution::optimizer, OptionBase::buildoption,
00391                 "Optimizer that optimizes the cost function.\n"
00392                 );
00393                   
00394   declareOption(ol, "variances_transfer_function", &GaussianContinuumDistribution::variances_transfer_function, 
00395                 OptionBase::buildoption,
00396                 "Type of output transfer function for predicted variances, to force them to be >0:\n"
00397                 "  square : take the square\n"
00398                 "  exp : apply the exponential\n"
00399                 "  softplus : apply the function log(1+exp(.))\n"
00400                 );
00401                   
00402   declareOption(ol, "architecture_type", &GaussianContinuumDistribution::architecture_type, OptionBase::buildoption,
00403                 "For pre-defined tangent_predictor types: \n"
00404                 "   single_neural_network : prediction = b + W*tanh(c + V*x), where W has n_hidden_units columns\n"
00405                 "                          where the resulting vector is viewed as a n_dim by n matrix\n"
00406     "   embedding_neural_network: prediction[k,i] = d(e[k])/d(x[i), where e(x) is an ordinary neural\n"
00407     "                             network representing the embedding function (see output_type option)\n"
00408                 "where (b,W,c,V) are parameters to be optimized.\n"
00409                 );
00410 
00411   declareOption(ol, "n_hidden_units", &GaussianContinuumDistribution::n_hidden_units, OptionBase::buildoption,
00412                 "Number of hidden units (if architecture_type is some kind of neural network)\n"
00413                 );
00414 /*
00415   declareOption(ol, "output_type", &GaussianContinuumDistribution::output_type, OptionBase::buildoption,
00416                 "Default value (the only one considered if architecture_type != embedding_*) is\n"
00417     "   tangent_plane: output the predicted tangent plane.\n"
00418     "   embedding: output the embedding vector (only if architecture_type == embedding_*).\n"
00419     "   tangent_plane+embedding: output both (in this order).\n"
00420                 );
00421 */
00422  
00423   declareOption(ol, "batch_size", &GaussianContinuumDistribution::batch_size, OptionBase::buildoption, 
00424                 "    how many samples to use to estimate the average gradient before updating the weights\n"
00425                 "    0 is equivalent to specifying training_set->length() \n");
00426 
00427   declareOption(ol, "svd_threshold", &GaussianContinuumDistribution::svd_threshold, OptionBase::buildoption,
00428                 "Threshold to accept singular values of F in solving for linear combination weights on tangent subspace.\n"
00429                 );
00430 
00431 
00432    declareOption(ol, "sm_bigger_than_sn", &GaussianContinuumDistribution::sm_bigger_than_sn, OptionBase::buildoption,
00433                 "Indication that sm should always be bigger than sn.\n"
00434                 );
00435 
00436 
00437   declareOption(ol, "walk_on_noise", &GaussianContinuumDistribution::walk_on_noise, OptionBase::buildoption,
00438                 "Indication that the random walk should also consider the noise variation.\n"
00439                 );
00440 
00441 
00442 
00443   declareOption(ol, "parameters", &GaussianContinuumDistribution::parameters, OptionBase::learntoption,
00444                 "Parameters of the tangent_predictor function.\n"
00445                 );
00446 
00447   declareOption(ol, "Bs", &GaussianContinuumDistribution::Bs, OptionBase::learntoption,
00448                 "The B matrices for the training set.\n"
00449                 );
00450 
00451   declareOption(ol, "Fs", &GaussianContinuumDistribution::Fs, OptionBase::learntoption,
00452                 "The F (tangent planes) matrices for the training set.\n"
00453                 );
00454 
00455   declareOption(ol, "mus", &GaussianContinuumDistribution::mus, OptionBase::learntoption,
00456                 "The mu vectors for the training set.\n"
00457                 );
00458 
00459   declareOption(ol, "sms", &GaussianContinuumDistribution::sms, OptionBase::learntoption,
00460                 "The sm values for the training set.\n"
00461                 );
00462   
00463   declareOption(ol, "sns", &GaussianContinuumDistribution::sns, OptionBase::learntoption,
00464                 "The sn values for the training set.\n"
00465                 );
00466 
00467   declareOption(ol, "min_sigma", &GaussianContinuumDistribution::min_sigma, OptionBase::buildoption,
00468                 "The minimum value for sigma noise and manifold.\n"
00469                 );
00470 
00471   declareOption(ol, "min_diff", &GaussianContinuumDistribution::min_diff, OptionBase::buildoption,
00472                 "The minimum value for the difference between sigma manifold and noise.\n"
00473                 );
00474 
00475   declareOption(ol, "fixed_min_sigma", &GaussianContinuumDistribution::fixed_min_sigma, OptionBase::buildoption,
00476                 "The fixed minimum value for sigma noise and manifold.\n"
00477                 );
00478 
00479   declareOption(ol, "fixed_min_diff", &GaussianContinuumDistribution::fixed_min_diff, OptionBase::buildoption,
00480                 "The fixed minimum value for the difference between sigma manifold and noise.\n"
00481                 );
00482 
00483   declareOption(ol, "min_p_x", &GaussianContinuumDistribution::min_p_x, OptionBase::buildoption,
00484                 "The minimum value for p_x, for stability concerns when doing gradient descent.\n"
00485                 );
00486 
00487   declareOption(ol, "n_random_walk_step", &GaussianContinuumDistribution::n_random_walk_step, OptionBase::buildoption,
00488                 "The number of random walk step.\n"
00489                 );
00490 
00491   declareOption(ol, "n_random_walk_per_point", &GaussianContinuumDistribution::n_random_walk_per_point, OptionBase::buildoption,
00492                 "The number of random walks per training set point.\n"
00493                 );
00494 
00495   declareOption(ol, "noise", &GaussianContinuumDistribution::noise, OptionBase::buildoption,
00496                 "Noise parameter for the training data. For uniform noise, this gives the half the length \n" "of the uniform window (centered around the origin), and for gaussian noise, this gives the variance of the noise in all directions.\n"
00497                 );
00498 
00499   declareOption(ol, "noise_type", &GaussianContinuumDistribution::noise_type, OptionBase::buildoption,
00500                 "Type of the noise (\"uniform\" or \"gaussian\").\n"
00501                 );
00502 
00503   declareOption(ol, "use_noise", &GaussianContinuumDistribution::use_noise, OptionBase::buildoption,
00504                 "Indication that the training should be done using noise on training data.\n"
00505                 );
00506 
00507   declareOption(ol, "use_noise_direction", &GaussianContinuumDistribution::use_noise_direction, OptionBase::buildoption,
00508                 "Indication that the noise should be directed in the noise directions.\n"
00509                 );
00510 
00511   declareOption(ol, "random_walk_step_prop", &GaussianContinuumDistribution::random_walk_step_prop, OptionBase::buildoption,
00512                 "Proportion or confidence of the random walk steps.\n"
00513                 );
00514 
00515   
00516   declareOption(ol, "reference_set", &GaussianContinuumDistribution::reference_set, OptionBase::learntoption,
00517                 "Reference points for density computation.\n"
00518                 );
00519   
00520   
00521 
00522 
00523   // Now call the parent class' declareOptions
00524   inherited::declareOptions(ol);
00525 }
00526 
00527 void GaussianContinuumDistribution::build_()
00528 {
00529 
00530   n = PLearner::inputsize_;
00531 
00532   if (n>0)
00533   {
00534 
00535     Var log_n_examples(1,1,"log(n_examples)");
00536     if(train_set)
00537       reference_set = train_set;
00538 
00539     {
00540       if (n_hidden_units <= 0)
00541         PLERROR("GaussianContinuumDistribution::Number of hidden units should be positive, now %d\n",n_hidden_units);
00542 
00543       
00544       x = Var(n);
00545       c = Var(n_hidden_units,1,"c ");
00546       V = Var(n_hidden_units,n,"V ");               
00547       Var a = tanh(c + product(V,x));
00548       muV = Var(n,n_hidden_units,"muV "); 
00549       smV = Var(1,n_hidden_units,"smV ");  
00550       smb = Var(1,1,"smB ");
00551       snV = Var(1,n_hidden_units,"snV ");  
00552       snb = Var(1,1,"snB ");      
00553         
00554 
00555       if(architecture_type == "embedding_neural_network")
00556       {
00557         W = Var(n_dim,n_hidden_units,"W ");       
00558         tangent_plane = diagonalized_factors_product(W,1-a*a,V); 
00559         embedding = product(W,a);
00560         output_embedding = Func(x,embedding);
00561       } 
00562       else if(architecture_type == "single_neural_network")
00563       {
00564         b = Var(n_dim*n,1,"b");
00565         W = Var(n_dim*n,n_hidden_units,"W ");
00566         tangent_plane = reshape(b + product(W,a),n_dim,n);
00567       }
00568       else
00569         PLERROR("GaussianContinuumDistribution::build_, unknown architecture_type option %s",
00570                 architecture_type.c_str());
00571      
00572       mu = product(muV,a); 
00573       fixed_min_sig = new SourceVariable(1,1);
00574       fixed_min_sig->value[0] = fixed_min_sigma;
00575       min_sig = Var(1,1);
00576       min_sig->setName("min_sig");
00577       fixed_min_d = new SourceVariable(1,1);
00578       fixed_min_d->value[0] = fixed_min_diff;
00579       min_d = Var(1,1);
00580       min_d->setName("min_d");
00581       if(noise > 0)
00582       {
00583         if(noise_type == "uniform")
00584         {
00585           PP<UniformDistribution> temp = new UniformDistribution();
00586           Vec lower_noise(n);
00587           Vec upper_noise(n);
00588           for(int i=0; i<n; i++)
00589           {
00590             lower_noise[i] = -1*noise;
00591             upper_noise[i] = noise;
00592           }
00593           temp->min = lower_noise;
00594           temp->max = upper_noise;
00595           dist = temp;
00596         }
00597         else if(noise_type == "gaussian")
00598         {
00599           PP<GaussianDistribution> temp = new GaussianDistribution();
00600           Vec mu(n); mu.clear();
00601           Vec eig_values(n); 
00602           Mat eig_vectors(n,n); eig_vectors.clear();
00603           for(int i=0; i<n; i++)
00604           {
00605             eig_values[i] = noise; // maybe should be adjusted to the sigma noiseat the input
00606             eig_vectors(i,i) = 1.0;
00607           }
00608           temp->mu = mu;
00609           temp->eigenvalues = eig_values;
00610           temp->eigenvectors = eig_vectors;
00611           dist = temp;
00612         }
00613         else PLERROR("In GaussianContinuumDistribution::build_() : noise_type %c not defined",noise_type.c_str());
00614         noise_var = new PDistributionVariable(x,dist);
00615         if(use_noise_direction)
00616         {
00617           for(int k=0; k<n_dim; k++)
00618           {
00619             Var index_var = new SourceVariable(1,1);
00620             index_var->value[0] = k;
00621             Var f_k = new VarRowVariable(tangent_plane,index_var);
00622             noise_var = noise_var - product(f_k,noise_var)* transpose(f_k)/pownorm(f_k,2);
00623           }
00624         }
00625         noise_var = no_bprop(noise_var);
00626         noise_var->setName(noise_type);
00627       }
00628       else
00629       {
00630         noise_var = new SourceVariable(n,1);
00631         noise_var->setName("no noise");
00632         for(int i=0; i<n; i++)
00633           noise_var->value[i] = 0;
00634       }
00635 
00636 
00637       // Path for noisy mu
00638       Var a_noisy = tanh(c + product(V,x+noise_var));
00639       mu_noisy = product(muV,a_noisy); 
00640 
00641       if(sm_bigger_than_sn)
00642       {
00643         if(variances_transfer_function == "softplus") sn = softplus(snb + product(snV,a))  + min_sig + fixed_min_sig;
00644         else if(variances_transfer_function == "square") sn = square(snb + product(snV,a)) + min_sig + fixed_min_sig;
00645         else if(variances_transfer_function == "exp") sn = exp(snb + product(snV,a)) + min_sig + fixed_min_sig;
00646         else PLERROR("In GaussianContinuumDistribution::build_ : unknown variances_transfer_function option %s ", variances_transfer_function.c_str());
00647         Var diff;
00648         
00649         if(variances_transfer_function == "softplus") diff = softplus(smb + product(smV,a)) + min_d + fixed_min_d;
00650         else if(variances_transfer_function == "square") diff = square(smb + product(smV,a)) + min_d + fixed_min_d;
00651         else if(variances_transfer_function == "exp") diff = exp(smb + product(smV,a)) + min_d + fixed_min_d;
00652         sm = sn + diff;
00653       }
00654       else
00655       {
00656         if(variances_transfer_function == "softplus"){
00657           sm = softplus(smb + product(smV,a)) + min_sig + fixed_min_sig; 
00658           sn = softplus(snb + product(snV,a)) + min_sig + fixed_min_sig;
00659         }
00660         else if(variances_transfer_function == "square"){
00661           sm = square(smb + product(smV,a)) + min_sig + fixed_min_sig; 
00662           sn = square(snb + product(snV,a)) + min_sig + fixed_min_sig;
00663         }
00664         else if(variances_transfer_function == "exp"){
00665           sm = exp(smb + product(smV,a)) + min_sig + fixed_min_sig; 
00666           sn = exp(snb + product(snV,a)) + min_sig + fixed_min_sig;
00667         }
00668         else PLERROR("In GaussianContinuumDistribution::build_ : unknown variances_transfer_function option %s ", variances_transfer_function.c_str());
00669       }
00670       
00671       if(sigma_grad_scale_factor > 0)
00672       {
00673         //sm = no_bprop(sm,sigma_grad_scale_factor);
00674         //sn = no_bprop(sn,sigma_grad_scale_factor);
00675         sn = threshold_bprop(sn,sigma_grad_scale_factor);
00676       }
00677 
00678       mu_noisy->setName("mu_noisy ");
00679       tangent_plane->setName("tangent_plane ");
00680       mu->setName("mu ");
00681       sm->setName("sm ");
00682       sn->setName("sn ");
00683       a_noisy->setName("a_noisy ");
00684       a->setName("a ");
00685       if(architecture_type == "embedding_neural_network")
00686         embedding->setName("embedding ");
00687       x->setName("x ");
00688 
00689       if(architecture_type == "embedding_neural_network")
00690         predictor = Func(x, W & c & V & muV & smV & smb & snV & snb & min_sig & min_d, tangent_plane & mu & sm & sn );
00691       if(architecture_type == "single_neural_network")
00692         predictor = Func(x, b & W & c & V & muV & smV & smb & snV & snb & min_sig & min_d, tangent_plane & mu & sm & sn );
00693 
00694       output_f_all = Func(x,tangent_plane & mu & sm & sn);
00695     }
00696 
00697     if (parameters.size()>0 && parameters.nelems() == predictor->parameters.nelems())
00698       predictor->parameters.copyValuesFrom(parameters);
00699     parameters.resize(predictor->parameters.size());
00700     for (int i=0;i<parameters.size();i++)
00701       parameters[i] = predictor->parameters[i];
00702 
00703     Var target_index = Var(1,1);
00704     target_index->setName("target_index");
00705     Var neighbor_indexes = Var(n_neighbors,1);
00706     neighbor_indexes->setName("neighbor_indexes");
00707     p_x = Var(reference_set->length(),1);
00708     p_x->setName("p_x");
00709     for(int i=0; i<p_x.length(); i++)
00710       p_x->value[i] = MISSING_VALUE;
00711 
00712     //p_target = new VarRowsVariable(p_x,target_index);
00713     p_target = new SourceVariable(1,1);
00714     p_target->value[0] = log(1.0/reference_set->length());
00715     p_target->setName("p_target");
00716     p_neighbors =new VarRowsVariable(p_x,neighbor_indexes);
00717     p_neighbors->setName("p_neighbors");
00718 
00719     tangent_targets = Var(n_neighbors,n);
00720     if(include_current_point)
00721     {
00722       Var temp = new SourceVariable(1,n);
00723       temp->value.fill(0);
00724       tangent_targets_and_point = vconcat(temp & tangent_targets);
00725       p_neighbors_and_point = vconcat(p_target & p_neighbors);
00726     }
00727     else
00728     {
00729       tangent_targets_and_point = tangent_targets;
00730       p_neighbors_and_point = p_neighbors;
00731     }
00732     
00733     if(mu_n_neighbors < 0 ) mu_n_neighbors = n_neighbors;
00734 
00735     // compute - log ( sum_{neighbors of x} P(neighbor|x) ) according to semi-spherical model
00736     Var nll = nll_semispherical_gaussian(tangent_plane, mu, sm, sn, tangent_targets_and_point, p_target, p_neighbors_and_point, noise_var, mu_noisy,
00737                                          use_noise, svd_threshold, min_p_x, mu_n_neighbors); // + log_n_examples;
00738     //nll_f = Func(tangent_plane & mu & sm & sn & tangent_targets, nll);
00739     Var knn = new SourceVariable(1,1);
00740     knn->setName("knn");
00741     knn->value[0] = n_neighbors + (include_current_point ? 1 : 0);
00742 
00743     if(weight_mu_and_tangent != 0)
00744     {
00745       sum_nll = new ColumnSumVariable(nll) / knn + weight_mu_and_tangent * ((Var) new RowSumVariable(square(product(no_bprop(tangent_plane),mu_noisy))));
00746     }
00747     else
00748       sum_nll = new ColumnSumVariable(nll) / knn;
00749 
00750     cost_of_one_example = Func(x & tangent_targets & target_index & neighbor_indexes, predictor->parameters, sum_nll);
00751     noisy_data = Func(x,x + noise_var);    // Func to verify what's the noisy data like (doesn't work so far, this problem will be investigated)
00752     //verify_gradient_func = Func(predictor->inputs & tangent_targets & target_index & neighbor_indexes, predictor->parameters & mu_noisy, sum_nll);  
00753 
00754     if(n_neighbors_density > reference_set.length()-!include_current_point || n_neighbors_density < 0) n_neighbors_density = reference_set.length() - !include_current_point;
00755 
00756     train_nearest_neighbors.resize(reference_set.length(), n_neighbors_density-1);
00757 
00758     t_row.resize(n);
00759     Ut_svd.resize(n,n);
00760     V_svd.resize(n_dim,n_dim);
00761     z.resize(n);
00762     zm.resize(n);
00763     zn.resize(n);
00764     x_minus_neighbor.resize(n);
00765     neighbor_row.resize(n);
00766     w.resize(n_dim);
00767 
00768     Bs.resize(reference_set.length());
00769     Fs.resize(reference_set.length());
00770     mus.resize(reference_set.length(), n);
00771     sms.resize(reference_set.length());
00772     sns.resize(reference_set.length());
00773     
00774   }
00775 
00776 }
00777 
00778 void GaussianContinuumDistribution::update_reference_set_parameters()
00779 {
00780     // Compute Fs, Bs, mus, sms, sns
00781   Bs.resize(reference_set.length());
00782   Fs.resize(reference_set.length());
00783   mus.resize(reference_set.length(), n);
00784   sms.resize(reference_set.length());
00785   sns.resize(reference_set.length());
00786   
00787   for(int t=0; t<reference_set.length(); t++)
00788   {
00789     Fs[t].resize(tangent_plane.length(), tangent_plane.width());
00790     reference_set->getRow(t,t_row);
00791     predictor->fprop(t_row, Fs[t].toVec() & mus(t) & sms.subVec(t,1) & sns.subVec(t,1));
00792     
00793     // computing B
00794 
00795     static Mat F_copy;
00796     F_copy.resize(Fs[t].length(),Fs[t].width());
00797     F_copy << Fs[t];
00798     // N.B. this is the SVD of F'
00799     lapackSVD(F_copy, Ut_svd, S_svd, V_svd,'A',1.5);
00800     Bs[t].resize(n_dim,reference_set.width());
00801     Bs[t].clear();
00802     for (int k=0;k<S_svd.length();k++)
00803     {
00804       real s_k = S_svd[k];
00805       if (s_k>svd_threshold) // ignore the components that have too small singular value (more robust solution)
00806       { 
00807         real coef = 1/s_k;
00808         for (int i=0;i<n_dim;i++)
00809         {
00810           real* Bi = Bs[t][i];
00811           for (int j=0;j<n;j++)
00812             Bi[j] += V_svd(i,k)*Ut_svd(k,j)*coef;
00813         }
00814       }
00815     }
00816     
00817   }
00818 /*
00819   for(int t=0; t<train_set.length(); t++)
00820   {
00821     //train_set->getRow(t,t_row);
00822     p_x->value[t] = log_density(t);
00823     //p_x->value[t] = exp(log_density(t));
00824   }
00825 */
00826 }
00827 
00828 void GaussianContinuumDistribution::knn(const VMat& vm, const Vec& x, const int& k, TVec<int>& neighbors, bool sortk) const
00829 {
00830   int n = vm->length();
00831   distances.resize(n,2);
00832   distances.column(1) << Vec(0, n-1, 1); 
00833   dk.setDataForKernelMatrix(vm);
00834   t_dist.resize(n);
00835   dk.evaluate_all_i_x(x, t_dist);
00836   distances.column(0) << t_dist;
00837   partialSortRows(distances, k, sortk);
00838   neighbors.resize(k);
00839   for (int i = 0, j=0; i < k  && j<n; j++)
00840   {
00841     real d = distances(j,0);
00842     if (include_current_point || d>0)  //Ouach, caca!!!
00843     {
00844       neighbors[i] = int(distances(j,1));
00845       i++;
00846     }
00847   }
00848 }
00849 
00850 void GaussianContinuumDistribution::make_random_walk()
00851 {
00852   if(n_random_walk_step < 1) PLERROR("Number of step in random walk should be at least one");
00853   if(n_random_walk_per_point < 1) PLERROR("Number of random walk per training set point should be at least one");
00854   ith_step_generated_set.resize(n_random_walk_step);
00855 
00856   Mat generated_set(train_set.length()*n_random_walk_per_point,n);
00857   for(int t=0; t<train_set.length(); t++)
00858   {
00859     train_set->getRow(t,t_row);
00860     output_f_all(t_row);
00861       
00862     real this_sm = sm->value[0];
00863     real this_sn = sn->value[0];
00864     Vec this_mu(n); this_mu << mu->value;
00865     static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
00866       
00867     // N.B. this is the SVD of F'
00868     lapackSVD(this_F, Ut_svd, S_svd, V_svd,'A',1.5);
00869       
00870 
00871     for(int rwp=0; rwp<n_random_walk_per_point; rwp++)
00872     {
00873       TVec<real> z_m(n_dim);
00874       TVec<real> z(n);
00875       for(int i=0; i<n_dim; i++)
00876         z_m[i] = normal_sample();
00877       for(int i=0; i<n; i++)
00878         z[i] = normal_sample();
00879 
00880       Vec new_point = generated_set(t*n_random_walk_per_point+rwp);
00881       for(int j=0; j<n; j++)
00882       {
00883         new_point[j] = 0;         
00884         for(int k=0; k<n_dim; k++)
00885           new_point[j] += Ut_svd(k,j)*z_m[k];
00886         new_point[j] *= sqrt(this_sm-this_sn);
00887         if(walk_on_noise)
00888           new_point[j] += z[j]*sqrt(this_sn);
00889       }
00890       new_point *= random_walk_step_prop;
00891       new_point += this_mu + t_row;
00892     }
00893   }
00894 
00895   // Test of generation of random points
00896   /*
00897   int n_test_gen_points = 3;
00898   int n_test_gen_generated = 30;
00899 
00900   Mat test_gen(n_test_gen_points*n_test_gen_generated,n);
00901   for(int p=0; p<n_test_gen_points; p++)
00902   {
00903     for(int t=0; t<n_test_gen_generated; t++)             
00904     {
00905       valid_set->getRow(p,t_row);
00906       output_f_all(t_row);
00907       
00908       real this_sm = sm->value[0];
00909       real this_sn = sn->value[0];
00910       Vec this_mu(n); this_mu << mu->value;
00911       static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
00912       
00913       // N.B. this is the SVD of F'
00914       lapackSVD(this_F, Ut_svd, S_svd, V_svd);      
00915 
00916       TVec<real> z_m(n_dim);
00917       TVec<real> z(n);
00918       for(int i=0; i<n_dim; i++)
00919         z_m[i] = normal_sample();
00920       for(int i=0; i<n; i++)
00921         z[i] = normal_sample();
00922 
00923       Vec new_point = test_gen(p*n_test_gen_generated+t);
00924       for(int j=0; j<n; j++)
00925       {
00926         new_point[j] = 0;         
00927         for(int k=0; k<n_dim; k++)
00928           new_point[j] += Ut_svd(k,j)*z_m[k];
00929         new_point[j] *= sqrt(this_sm-this_sn);
00930         if(walk_on_noise)
00931           new_point[j] += z[j]*sqrt(this_sn);
00932       }
00933       new_point += this_mu + t_row;
00934     }
00935   }
00936   
00937   PLearn::save("test_gen.psave",test_gen);
00938   */
00939   //PLearn::save("gen_points_0.psave",generated_set);
00940   ith_step_generated_set[0] = VMat(generated_set);
00941   
00942   for(int step=1; step<n_random_walk_step; step++)
00943   {
00944     Mat generated_set(ith_step_generated_set[step-1].length(),n);
00945     for(int t=0; t<ith_step_generated_set[step-1].length(); t++)
00946     {
00947       ith_step_generated_set[step-1]->getRow(t,t_row);
00948       output_f_all(t_row);
00949       
00950       real this_sm = sm->value[0];
00951       real this_sn = sn->value[0];
00952       Vec this_mu(n); this_mu << mu->value;
00953       static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
00954       
00955       // N.B. this is the SVD of F'
00956       lapackSVD(this_F, Ut_svd, S_svd, V_svd,'A',1.5);
00957       
00958       TVec<real> z_m(n_dim);
00959       TVec<real> z(n);
00960       for(int i=0; i<n_dim; i++)
00961         z_m[i] = normal_sample();
00962       for(int i=0; i<n; i++)
00963         z[i] = normal_sample();
00964       
00965       Vec new_point = generated_set(t);
00966       for(int j=0; j<n; j++)
00967       {
00968         new_point[j] = 0;
00969         for(int k=0; k<n_dim; k++)
00970           if(S_svd[k] > svd_threshold)
00971             new_point[j] += Ut_svd(k,j)*z_m[k];
00972         new_point[j] *= sqrt(this_sm-this_sn);
00973         if(walk_on_noise)
00974           new_point[j] += z[j]*sqrt(this_sn);
00975       }
00976       new_point *= random_walk_step_prop;
00977       new_point += this_mu + t_row;
00978     
00979     }
00980     /*
00981     string path = " ";
00982     if(step == n_random_walk_step-1)
00983       path = "gen_points_last.psave";
00984     else
00985       path = "gen_points_" + tostring(step) + ".psave";
00986     
00987     PLearn::save(path,generated_set);
00988     */
00989     ith_step_generated_set[step] = VMat(generated_set);
00990   }
00991 
00992   reference_set = vconcat(train_set & ith_step_generated_set);
00993 
00994   // Single random walk
00995   /*
00996   Mat single_walk_set(100,n);
00997   train_set->getRow(train_set.length()-1,single_walk_set(0));
00998   for(int step=1; step<100; step++)
00999   {
01000     t_row << single_walk_set(step-1);
01001     output_f_all(t_row);
01002       
01003     real this_sm = sm->value[0];
01004     real this_sn = sn->value[0];
01005     Vec this_mu(n); this_mu << mu->value;
01006     static Mat this_F(n_dim,n); this_F << tangent_plane->matValue;
01007     
01008     // N.B. this is the SVD of F'
01009     lapackSVD(this_F, Ut_svd, S_svd, V_svd);
01010     
01011     TVec<real> z_m(n_dim);
01012     TVec<real> z(n);
01013     for(int i=0; i<n_dim; i++)
01014       z_m[i] = normal_sample();
01015     for(int i=0; i<n; i++)
01016       z[i] = normal_sample();
01017     
01018     Vec new_point = single_walk_set(step);
01019     for(int j=0; j<n; j++)
01020     {
01021       new_point[j] = 0;
01022       for(int k=0; k<n_dim; k++)
01023         if(S_svd[k] > svd_threshold)
01024           new_point[j] += Ut_svd(k,j)*z_m[k];
01025       new_point[j] *= sqrt(this_sm-this_sn);
01026       if(walk_on_noise)
01027         new_point[j] += z[j]*sqrt(this_sn);
01028     }
01029     new_point *= random_walk_step_prop;
01030     new_point += this_mu + t_row;
01031   }
01032   PLearn::save("image_single_rw.psave",single_walk_set);
01033   */
01034 }
01035 
01036 void GaussianContinuumDistribution::compute_train_and_validation_costs()
01037 {
01038   update_reference_set_parameters();
01039 
01040   // estimate p(x) for the training set
01041   /*
01042   real nll_train = 0;
01043 
01044   for(int t=0; t<train_set.length(); t++)
01045   {
01046 
01047     train_set->getRow(t,t_row);
01048     p_x->value[t] = 0;
01049     // fetching nearest neighbors for density estimation
01050     for(int neighbor=0; neighbor<train_nearest_neighbors.width(); neighbor++)
01051     {
01052       train_set->getRow(train_nearest_neighbors(t,neighbor),neighbor_row);
01053       substract(t_row,neighbor_row,x_minus_neighbor);
01054       substract(x_minus_neighbor,mus(train_nearest_neighbors(t,neighbor)),z);
01055       product(w, Bs[train_nearest_neighbors(t,neighbor)], z);
01056       transposeProduct(zm, Fs[train_nearest_neighbors(t,neighbor)], w);
01057       substract(z,zm,zn);
01058       p_x->value[t] += exp(-0.5*(pownorm(zm,2)/sms[train_nearest_neighbors(t,neighbor)] + pownorm(zn,2)/sns[train_nearest_neighbors(t,neighbor)] 
01059                          + n_dim*log(sms[train_nearest_neighbors(t,neighbor)]) + (n-n_dim)*log(sns[train_nearest_neighbors(t,neighbor)])) - n/2.0 * Log2Pi);
01060     }
01061     p_x->value[t] /= train_set.length();
01062     nll_train -= log(p_x->value[t]);
01063 
01064   }
01065 
01066   nll_train /= train_set.length();
01067 
01068   if(verbosity > 2) cout << "NLL train = " << nll_train << endl;
01069 
01070   // estimate p(x) for the validation set
01071 
01072   real nll_validation = 0;
01073 
01074   for(int t=0; t<valid_set.length(); t++)
01075   {
01076 
01077     valid_set->getRow(t,t_row);
01078     real this_p_x = 0;
01079     // fetching nearest neighbors for density estimation
01080     for(int neighbor=0; neighbor<n_neighbors_density; neighbor++)
01081     {
01082       train_set->getRow(validation_nearest_neighbors(t,neighbor), neighbor_row);
01083       substract(t_row,neighbor_row,x_minus_neighbor);
01084       substract(x_minus_neighbor,mus(validation_nearest_neighbors(t,neighbor)),z);
01085       product(w, Bs[validation_nearest_neighbors(t,neighbor)], z);
01086       transposeProduct(zm, Fs[validation_nearest_neighbors(t,neighbor)], w);
01087       substract(z,zm,zn);
01088       this_p_x += exp(-0.5*(pownorm(zm,2)/sms[validation_nearest_neighbors(t,neighbor)] + pownorm(zn,2)/sns[validation_nearest_neighbors(t,neighbor)] 
01089                          + n_dim*log(sms[validation_nearest_neighbors(t,neighbor)]) + (n-n_dim)*log(sns[validation_nearest_neighbors(t,neighbor)])) - n/2.0 * Log2Pi);
01090     }
01091 
01092     this_p_x /= train_set.length();  // When points will be added using a random walk, this will need to be changed (among other things...)
01093     nll_validation -= log(this_p_x);
01094   }
01095 
01096   nll_validation /= valid_set.length();
01097 
01098   if(verbosity > 2) cout << "NLL validation = " << nll_validation << endl;
01099   */
01100 }
01101 
01102 // ### Nothing to add here, simply calls build_
01103 void GaussianContinuumDistribution::build()
01104 {
01105   inherited::build();
01106   build_();
01107 }
01108 
01109 #ifdef __INTEL_COMPILER
01110 #pragma warning(disable:1419)  // Get rid of compiler warning.
01111 #endif
01112 extern void varDeepCopyField(Var& field, CopiesMap& copies);
01113 #ifdef __INTEL_COMPILER
01114 #pragma warning(default:1419)
01115 #endif
01116 
01117 void GaussianContinuumDistribution::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01118 {  inherited::makeDeepCopyFromShallowCopy(copies);
01119 
01120   deepCopyField(cost_of_one_example, copies);
01121   deepCopyField(reference_set,copies);
01122   varDeepCopyField(x, copies);
01123   varDeepCopyField(noise_var, copies);  
01124   varDeepCopyField(b, copies);
01125   varDeepCopyField(W, copies);
01126   varDeepCopyField(c, copies);
01127   varDeepCopyField(V, copies);
01128   varDeepCopyField(tangent_targets, copies);
01129   varDeepCopyField(muV, copies);
01130   varDeepCopyField(smV, copies);
01131   varDeepCopyField(smb, copies);
01132   varDeepCopyField(snV, copies);
01133   varDeepCopyField(snb, copies);
01134   varDeepCopyField(mu, copies);
01135   varDeepCopyField(sm, copies);
01136   varDeepCopyField(sn, copies);
01137   varDeepCopyField(mu_noisy, copies);
01138   varDeepCopyField(tangent_plane, copies);
01139   varDeepCopyField(tangent_targets_and_point, copies);
01140   varDeepCopyField(sum_nll, copies);
01141 //  varDeepCopyField(min_sig, copies);
01142 //  varDeepCopyField(min_d, copies);
01143   varDeepCopyField(embedding, copies);
01144 
01145   deepCopyField(dist, copies);
01146   deepCopyField(ith_step_generated_set, copies);
01147   deepCopyField(train_nearest_neighbors, copies);
01148 
01149   deepCopyField(Bs, copies);
01150   deepCopyField(Fs, copies);
01151   deepCopyField(mus, copies);
01152   deepCopyField(sms, copies);
01153   deepCopyField(sns, copies);
01154   deepCopyField(Ut_svd, copies);
01155   deepCopyField(V_svd, copies);
01156   deepCopyField(S_svd, copies);
01157   //deepCopyField(dk, copies);
01158 
01159   deepCopyField(parameters, copies);
01160   deepCopyField(optimizer, copies);
01161   deepCopyField(predictor, copies);
01162   deepCopyField(output_f, copies);
01163   deepCopyField(output_f_all, copies);
01164   deepCopyField(projection_error_f, copies);
01165   deepCopyField(noisy_data, copies);
01166   deepCopyField(output_embedding, copies);
01167 
01168   // TODO : NB: It is not complete !
01169   deepCopyField(log_gauss, copies);
01170   deepCopyField(w_mat, copies);
01171 }
01172 
01173 
01174 void GaussianContinuumDistribution::forget()
01175 {
01176   if (train_set) initializeParams();
01177   stage = 0;
01178 }
01179     
01180 void GaussianContinuumDistribution::train()
01181 {
01182 
01183   // Set train_stats if not already done.
01184   if (!train_stats)
01185     train_stats = new VecStatsCollector();
01186 
01187   // find nearest neighbors...
01188 
01189   // ... on the training set
01190   
01191   if(stage == 0)
01192     for(int t=0; t<train_set.length(); t++)
01193     {
01194       train_set->getRow(t,t_row);
01195       TVec<int> nn = train_nearest_neighbors(t);
01196       computeNearestNeighbors(train_set, t_row, nn, t);
01197     }
01198   
01199   VMat train_set_with_targets;
01200   VMat targets_vmat;
01201   if (!cost_of_one_example)
01202     PLERROR("GaussianContinuumDistribution::train: build has not been run after setTrainingSet!");
01203 
01204   targets_vmat = local_neighbors_differences(train_set, n_neighbors, false, true);
01205 
01206   train_set_with_targets = hconcat(train_set, targets_vmat);
01207   train_set_with_targets->defineSizes(inputsize()+inputsize()*n_neighbors+1+n_neighbors,0);
01208   int l = train_set->length();  
01209   //log_n_examples->value[0] = log(real(l));
01210   int nsamples = batch_size>0 ? batch_size : l;
01211 
01212   Var totalcost = meanOf(train_set_with_targets, cost_of_one_example, nsamples);
01213 
01214   if(optimizer)
01215     {
01216       optimizer->setToOptimize(parameters, totalcost);  
01217       optimizer->build();
01218     }
01219   else PLERROR("GaussianContinuumDistribution::train can't train without setting an optimizer first!");
01220   
01221   // number of optimizer stages corresponding to one learner stage (one epoch)
01222   int optstage_per_lstage = l/nsamples;
01223 
01224   ProgressBar* pb = 0;
01225   if(report_progress>0)
01226     pb = new ProgressBar("Training GaussianContinuumDistribution from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01227 
01228   t_row.resize(train_set.width());
01229 
01230   int initial_stage = stage;
01231   bool early_stop=false;
01232   while(stage<nstages && !early_stop)
01233     {
01234       optimizer->nstages = optstage_per_lstage;
01235       train_stats->forget();
01236       optimizer->early_stop = false;
01237       optimizer->optimizeN(*train_stats);
01238       train_stats->finalize();
01239       if(verbosity>2)
01240         cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
01241       ++stage;
01242       if(pb)
01243         pb->update(stage-initial_stage);
01244       
01245       if(stage != 0 && stage%update_parameters_every_n_epochs == 0)
01246       {
01247         compute_train_and_validation_costs();
01248       }
01249     }
01250   if(verbosity>1)
01251     cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
01252 
01253   if(pb)
01254     delete pb;
01255   
01256   update_reference_set_parameters();
01257 
01258   if(n_random_walk_step > 0)
01259   {
01260     make_random_walk();
01261     update_reference_set_parameters();
01262   }
01263 }
01264 
01266 // initializeParams //
01268 void GaussianContinuumDistribution::initializeParams()
01269 {
01270   if (seed_>=0)
01271     manual_seed(seed_);
01272   else
01273     PLearn::seed();
01274 
01275   if (architecture_type=="embedding_neural_network")
01276   {
01277     real delta = 1.0 / sqrt(real(inputsize()));
01278     fill_random_uniform(V->value, -delta, delta);
01279     delta = 1.0 / real(n_hidden_units);
01280     fill_random_uniform(W->matValue, -delta, delta);
01281     c->value.clear();
01282     fill_random_uniform(smV->matValue, -delta, delta);
01283     smb->value.clear();
01284     fill_random_uniform(smV->matValue, -delta, delta);
01285     snb->value.clear();
01286     fill_random_uniform(snV->matValue, -delta, delta);
01287     fill_random_uniform(muV->matValue, -delta, delta);
01288     min_sig->value[0] = min_sigma;
01289     min_d->value[0] = min_diff;
01290   }
01291   else if (architecture_type=="single_neural_network")
01292   {
01293     real delta = 1.0 / sqrt(real(inputsize()));
01294     fill_random_uniform(V->value, -delta, delta);
01295     delta = 1.0 / real(n_hidden_units);
01296     fill_random_uniform(W->matValue, -delta, delta);
01297     c->value.clear();
01298     fill_random_uniform(smV->matValue, -delta, delta);
01299     smb->value.clear();
01300     fill_random_uniform(smV->matValue, -delta, delta);
01301     snb->value.clear();
01302     fill_random_uniform(snV->matValue, -delta, delta);
01303     fill_random_uniform(muV->matValue, -delta, delta);
01304     b->value.clear();
01305     min_sig->value[0] = min_sigma;
01306     min_d->value[0] = min_diff;
01307   }
01308   else PLERROR("other types not handled yet!");
01309   
01310   for(int i=0; i<p_x.length(); i++)
01311     //p_x->value[i] = log(1.0/p_x.length());
01312     p_x->value[i] = MISSING_VALUE;
01313   if(optimizer)
01314     optimizer->reset();
01315 }
01316 
01318 // log_density //
01320 real GaussianContinuumDistribution::log_density(const Vec& x) const {
01321   // Compute log-density.
01322 
01323   // Fetching nearest neighbors for density estimation.
01324   knn(reference_set,x,n_neighbors_density,t_nn,bool(0));
01325   w_mat.resize(t_nn.length(), w.length());
01326   Vec w_vec;
01327   t_row << x;
01328   log_gauss.resize(t_nn.length());
01329   real log_ref_set = log((real)reference_set.length());
01330   for(int neighbor=0; neighbor<t_nn.length(); neighbor++)
01331   {
01332     w_vec = w_mat(neighbor);
01333     reference_set->getRow(t_nn[neighbor],neighbor_row);
01334     substract(t_row,neighbor_row,x_minus_neighbor);
01335     substract(x_minus_neighbor,mus(t_nn[neighbor]),z);
01336     product(w_vec, Bs[t_nn[neighbor]], z);
01337     transposeProduct(zm, Fs[t_nn[neighbor]], w_vec);
01338     substract(z,zm,zn);
01339     log_gauss[neighbor] = -0.5*(pownorm(zm,2)/sms[t_nn[neighbor]] + pownorm(zn,2)/sns[t_nn[neighbor]] 
01340                                 + n_dim*log(sms[t_nn[neighbor]]) + (n-n_dim)*log(sns[t_nn[neighbor]])) - n/2.0 * Log2Pi - log_ref_set;
01341   }
01342   
01343   return logadd(log_gauss);
01344 }
01345 
01346 real GaussianContinuumDistribution::log_density(int i) {
01347   // compute log-density
01348 
01349   // fetching nearest neighbors for density estimation
01350   //knn(reference_set,x,n_neighbors_density,t_nn,bool(0));
01351   //t_row << x;
01352   reference_set->getRow(i,t_row);
01353   int bla = 0;
01354   log_gauss.resize(reference_set.length()-1);
01355   real log_ref_set = log((real)reference_set.length());
01356   for(int neighbor=0; neighbor<reference_set.length(); neighbor++)
01357   {
01358     if(neighbor == i) 
01359     {
01360       bla = 1;
01361       continue;
01362     }
01363     reference_set->getRow(neighbor,neighbor_row);
01364     substract(t_row,neighbor_row,x_minus_neighbor);
01365     substract(x_minus_neighbor,mus(neighbor),z);
01366     product(w, Bs[neighbor], z);
01367     transposeProduct(zm, Fs[neighbor], w);
01368     substract(z,zm,zn);
01369     log_gauss[neighbor-bla] = -0.5*(pownorm(zm,2)/sms[neighbor] + pownorm(zn,2)/sns[neighbor] 
01370                                 + n_dim*log(sms[neighbor]) + (n-n_dim)*log(sns[neighbor])) - n/2.0 * Log2Pi - log_ref_set;
01371   }
01372   
01373   return logadd(log_gauss);
01374 }
01375 
01377 // getEigenvectors //
01379 Mat GaussianContinuumDistribution::getEigenvectors(int j) const {
01380   return Bs[j];
01381 }
01382 
01383 Vec GaussianContinuumDistribution::getTrainPoint(int j) const {
01384   Vec ret(reference_set->width());
01385   reference_set->getRow(j,ret);
01386   return ret;
01387 }
01388 
01390 // computeOutput //
01392 void GaussianContinuumDistribution::computeOutput(const Vec& input, Vec& output) const
01393 {
01394   switch(outputs_def[0])
01395   {
01396   case 'm':
01397     output_embedding(input);
01398     output << embedding->value;
01399     break;
01400   default:
01401     inherited::computeOutput(input,output);
01402   }
01403 }
01404 
01406 // outputsize //
01408 int GaussianContinuumDistribution::outputsize() const
01409 {
01410   switch(outputs_def[0])
01411   {
01412   case 'm':
01413     return n_dim;
01414     break;
01415   default:
01416     return inherited::outputsize();
01417   }
01418 }
01419 
01420 } // end of namespace PLearn
01421 
01422 
01423 /*
01424   Local Variables:
01425   mode:c++
01426   c-basic-offset:4
01427   c-file-style:"stroustrup"
01428   c-file-offsets:((innamespace . 0)(inline-open . 0))
01429   indent-tabs-mode:nil
01430   fill-column:79
01431   End:
01432 */
01433 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines