PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // GaussianContinuumDistribution.h 00004 // 00005 // Copyright (C) 2004 Yoshua Bengio & Hugo Larochelle 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: GaussianContinuumDistribution.h 9418 2008-09-02 15:33:46Z nouiz $ 00037 ******************************************************* */ 00038 00039 // Authors: Yoshua Bengio & Martin Monperrus 00040 00044 #ifndef GaussianContinuumDistribution_INC 00045 #define GaussianContinuumDistribution_INC 00046 00047 #include "UnconditionalDistribution.h" 00048 #include <plearn/io/PStream.h> 00049 #include <plearn_learners/generic/PLearner.h> 00050 #include <plearn/var/Func.h> 00051 #include <plearn/opt/Optimizer.h> 00052 #include <plearn_learners/distributions/PDistribution.h> 00053 #include <plearn/ker/DistanceKernel.h> 00054 00055 namespace PLearn { 00056 using namespace std; 00057 00058 class GaussianContinuumDistribution: public UnconditionalDistribution 00059 { 00060 00061 private: 00062 00063 typedef UnconditionalDistribution inherited; 00064 00065 protected: 00066 // NON-OPTION FIELDS 00067 int n; 00068 Func cost_of_one_example; 00069 //Func verify_gradient_func; 00070 Var x, noise_var; // input vector 00071 Var b, W, c, V, muV, smV, smb, snV, snb; // explicit view of the parameters (also in parameters field). 00072 //Var W_src, c_src, V_src, muV_src, smV_src, smb_src, snV_src, snb_src; 00073 //VarArray mu_neighbors, sm_neighbors, sn_neighbors, hidden_neighbors, input_neighbors, index_neighbors, tangent_plane_neighbors; 00074 Var tangent_targets, tangent_targets_and_point; // target for the tangent vectors for one example 00075 Var tangent_plane; 00076 Var mu, sm, sn, mu_noisy; // parameters of the conditional models 00077 Var p_x, p_target, p_neighbors, p_neighbors_and_point, target_index, neigbor_indexes; 00078 Var sum_nll; 00079 Var min_sig, min_d; 00080 Var fixed_min_sig, fixed_min_d; 00081 00082 PP<PDistribution> dist; 00083 00084 // Random walk fields 00085 Array<VMat> ith_step_generated_set; 00086 00087 // p(x) computation fields 00088 VMat train_and_generated_set; 00089 TMat<int> train_nearest_neighbors; 00090 TVec< Mat > Bs, Fs; 00091 Mat mus; 00092 Vec sms; 00093 Vec sns; 00094 00095 Mat Ut_svd, V_svd; // for SVD computation 00096 Vec S_svd; // idem 00097 mutable Vec z, zm, zn, x_minus_neighbor, w; 00098 mutable Vec t_row, neighbor_row; 00099 mutable Vec t_dist; 00100 mutable Mat distances; 00101 00102 mutable DistanceKernel dk; 00103 00104 real best_validation_cost; 00105 00106 // ********************* 00107 // * protected options * 00108 // ********************* 00109 00110 // ### declare protected option fields (such as learnt parameters) here 00111 VarArray parameters; 00112 00113 public: 00114 00115 mutable TVec<int> t_nn; 00116 mutable Vec log_gauss; 00117 mutable Mat w_mat; 00118 VMat reference_set; 00119 00120 // ************************ 00121 // * public build options * 00122 // ************************ 00123 00124 // ### declare public option fields (such as build options) here 00125 00126 real weight_mu_and_tangent; 00127 bool include_current_point; 00128 real random_walk_step_prop; 00129 bool use_noise; 00130 bool use_noise_direction; 00131 real noise; 00132 string noise_type; 00133 int n_random_walk_step; 00134 int n_random_walk_per_point; 00135 bool save_image_mat; 00136 bool walk_on_noise; 00137 real min_sigma; 00138 real min_diff; 00139 real fixed_min_sigma; 00140 real fixed_min_diff; 00141 real min_p_x; 00142 bool sm_bigger_than_sn; 00143 int n_neighbors; // number of neighbors used for gradient descent 00144 int n_neighbors_density; // number of neighbors for the p(x) density estimation 00145 int mu_n_neighbors; // number of neighbors to learn the mus 00146 int n_dim; // number of reduced dimensions (number of tangent vectors to compute) 00147 real sigma_grad_scale_factor; 00148 int update_parameters_every_n_epochs; 00149 string variances_transfer_function; // "square", "exp" or "softplus" 00150 PP<Optimizer> optimizer; // to estimate the function that predicts local tangent vectors given the input 00151 Var embedding; 00152 Func output_embedding; 00153 Func output_f; 00154 Func output_f_all; 00155 Func predictor; // predicts everything about the gaussian 00156 Func projection_error_f; // map output to projection error 00157 Func noisy_data; 00158 00159 // manual construction of the tangent_predictor 00160 string architecture_type; // "neural_network" or "linear" or "" or "embedding_neural_nework" or "embedding_quadratic" 00161 string output_type; // "tangent_plane", "embedding", or "tangent_plane+embedding". 00162 int n_hidden_units; 00163 00164 int batch_size; 00165 00166 real norm_penalization; // penalizes sum_i (||f_i||^2-1)^2 00167 real svd_threshold; 00168 00169 // **************** 00170 // * Constructors * 00171 // **************** 00172 00174 GaussianContinuumDistribution(); 00175 00176 00177 // ******************** 00178 // * PLearner methods * 00179 // ******************** 00180 00181 private: 00182 00184 void build_(); 00185 00186 void compute_train_and_validation_costs(); 00187 00188 void make_random_walk(); 00189 00190 void update_reference_set_parameters(); 00191 00192 void knn(const VMat& vm, const Vec& x, const int& k, TVec<int>& neighbors, bool sortk) const; 00193 00194 protected: 00195 00197 static void declareOptions(OptionList& ol); 00198 00201 virtual void forget(); 00202 virtual void initializeParams(); 00203 00204 public: 00205 00206 // ************************ 00207 // **** Object methods **** 00208 // ************************ 00209 00211 virtual void build(); 00212 00214 virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies); 00215 00216 // Declares other standard object methods. 00217 // If your class is not instantiatable (it has pure virtual methods) 00218 // you should replace this by PLEARN_DECLARE_ABSTRACT_OBJECT_METHODS. 00219 PLEARN_DECLARE_OBJECT(GaussianContinuumDistribution); 00220 00221 // ******************************* 00222 // **** PDistribution methods **** 00223 // ******************************* 00224 00226 virtual real log_density(const Vec& x) const; 00227 00229 real log_density(int i); 00230 00233 virtual void train(); 00234 00235 /* Not implemented for now 00237 virtual void expectation(Vec& mu) const; 00238 00240 virtual void variance(Mat& cov) const; 00241 00243 virtual void generate(Vec& y) const; 00244 00246 virtual void resetGenerator(long g_seed) const; 00247 */ 00248 00250 virtual void computeOutput(const Vec& input, Vec& output) const; 00251 00253 virtual int outputsize() const; 00254 00255 /* Not needed anymore 00258 virtual int outputsize() const; 00259 */ 00260 00261 00262 // *** SUBCLASS WRITING: *** 00263 // While in general not necessary, in case of particular needs 00264 // (efficiency concerns for ex) you may also want to overload 00265 // some of the following methods: 00266 // virtual void computeOutputAndCosts(const Vec& input, const Vec& target, Vec& output, Vec& costs) const; 00267 // virtual void computeCostsOnly(const Vec& input, const Vec& target, Vec& costs) const; 00268 // virtual void test(VMat testset, PP<VecStatsCollector> test_stats, VMat testoutputs=0, VMat testcosts=0) const; 00269 // virtual int nTestCosts() const; 00270 // virtual int nTrainCosts() const; 00271 00272 Mat getEigenvectors(int j) const; 00273 00274 Vec getTrainPoint(int j) const; 00275 00276 }; 00277 00278 // Declares a few other classes and functions related to this class. 00279 DECLARE_OBJECT_PTR(GaussianContinuumDistribution); 00280 00281 } // end of namespace PLearn 00282 00283 #endif 00284 00285 00286 /* 00287 Local Variables: 00288 mode:c++ 00289 c-basic-offset:4 00290 c-file-style:"stroustrup" 00291 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00292 indent-tabs-mode:nil 00293 fill-column:79 00294 End: 00295 */ 00296 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :