PLearn 0.1
|
00001 // -*- C++ -*- 00002 // NnlmOutputLayer.h 00003 // 00004 // Copyright (C) 2006 Pierre-Antoine Manzagol 00005 // 00006 // Redistribution and use in source and binary forms, with or without 00007 // modification, are permitted provided that the following conditions are met: 00008 // 00009 // 1. Redistributions of source code must retain the above copyright 00010 // notice, this list of conditions and the following disclaimer. 00011 // 00012 // 2. Redistributions in binary form must reproduce the above copyright 00013 // notice, this list of conditions and the following disclaimer in the 00014 // documentation and/or other materials provided with the distribution. 00015 // 00016 // 3. The name of the authors may not be used to endorse or promote 00017 // products derived from this software without specific prior written 00018 // permission. 00019 // 00020 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00021 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00022 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00023 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00024 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00025 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00026 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00027 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00028 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00029 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00030 // 00031 // This file is part of the PLearn library. For more information on the PLearn 00032 // library, go to the PLearn Web site at www.plearn.org 00033 00034 // Authors: Pierre-Antoine Manzagol 00035 00039 #ifndef NnlmOutputLayer_INC 00040 #define NnlmOutputLayer_INC 00041 00042 #include <plearn/base/Object.h> 00043 #include <plearn/math/TMat_maths.h> 00044 #include <plearn_learners/online/OnlineLearningModule.h> 00045 00046 namespace PLearn { 00047 00070 class NnlmOutputLayer : public OnlineLearningModule 00071 { 00072 typedef OnlineLearningModule inherited; 00073 00074 public: 00075 //##### Public Build Options ############################################ 00076 00078 int target_cardinality; 00079 00081 int context_cardinality; 00082 00084 real sigma2min; 00085 00087 real dl_start_learning_rate; 00088 real dl_decrease_constant; 00089 00091 // how much the first example of this word in the trainset should be worth in the sum 00092 // relatively to the last. \f$ /mu' = (1-/alpha) /mu + /alpha r \f$ 00093 // Determine \f$ (1-/alpha)^n = el_start_discount_factor \f$ 00094 real el_start_discount_factor; 00095 00096 00097 public: 00098 //##### Public Member Functions ######################################### 00099 00101 NnlmOutputLayer(); 00102 00104 void resetParameters(); 00105 00108 void resetAllClassVars(); 00109 void updateClassVars(const int the_target, const Vec& the_input); // for "target" 00110 void applyAllClassVars(); 00111 00114 void computeEmpiricalLearningRateParameters(); 00115 00117 void setTarget(int the_target) const; 00119 void setContext(int the_context) const; 00121 void setCost(int the_cost); 00122 void setLearning(int the_learning); 00123 00124 00127 virtual void fprop(const Vec& input, Vec& output) const; 00128 00135 /*virtual void bpropUpdate(const Vec& input, const Vec& output, 00136 const Vec& output_gradient);*/ 00137 00142 virtual void bpropUpdate(const Vec& input, const Vec& output, 00143 Vec& input_gradient, 00144 const Vec& output_gradient); 00145 00149 virtual void forget(); 00150 00155 // virtual void finalize(); 00156 00157 00159 void compute_nl_p_rt(const Vec& input, Vec& output) const; 00160 00162 void compute_approx_nl_p_t_r(const Vec& input, Vec& output) const; 00163 00165 void compute_nl_p_t_r(const Vec& input, Vec& output) const; 00166 00168 void getBestCandidates(const Vec& input, Vec& candidate_tags, Vec& probabilities) const; 00169 00171 //{ 00172 void computeNonDiscriminantGradient() const; 00173 void computeApproxDiscriminantGradient() const; 00174 void computeDiscriminantGradient() const; 00175 void addCandidateContribution( int c ) const; 00176 //} 00177 00178 00180 //{ 00182 void applyMuAndSigmaEmpiricalUpdate(const Vec& input) const; 00183 00185 void applyMuGradient() const; 00186 void applyMuTargetGradient() const; 00187 void applyMuCandidateGradient(int c) const; 00188 00190 void applySigmaGradient() const; 00191 void applySigmaTargetGradient() const; 00192 void applySigmaCandidateGradient(int c) const; 00193 //} 00194 00195 00196 //##### PLearn::Object Protocol ######################################### 00197 00198 // Declares other standard object methods. 00199 // ### If your class is not instantiatable (it has pure virtual methods) 00200 // ### you should replace this by PLEARN_DECLARE_ABSTRACT_OBJECT 00201 PLEARN_DECLARE_OBJECT(NnlmOutputLayer); 00202 00203 // Simply calls inherited::build() then build_() 00204 virtual void build(); 00205 00207 virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies); 00208 00209 00210 protected: 00211 //##### Protected Member Functions ###################################### 00212 00214 static void declareOptions(OptionList& ol); 00215 00216 private: 00217 //##### Private Member Functions ######################################## 00218 00220 void build_(); 00221 00222 private: 00223 //##### Private Data Members ############################################ 00224 00225 00226 public: 00227 //##### Public NOT Options ############################################## 00228 00230 int step_number; 00231 00237 real umc; 00238 00240 Vec pi; 00241 00243 Mat mu; 00244 Mat sigma2; 00245 00246 Vec global_mu; 00247 Vec global_sigma2; 00248 00251 mutable int s_sumI; // sum_t 1 00252 TVec<int> sumI; // sumI(i) -> sum_t 1_{c==i} 00253 00254 Mat sumR; // sumR(i) -> sum_t r_t 1_{c==i} 00255 Mat sumR2; // sumR2(i) -> sum_t r_t^2 1_{c==i} 00256 00257 Vec global_sumR; 00258 Vec global_sumR2; 00259 00260 // TODO THIS COULD BE A LEARNT OPTION 00262 TVec<int> shared_candidates; // frequent (ie paying) words 00263 TVec< TVec<int> > candidates; // context specific candidates 00264 00265 // for learning umc 00266 //mutable real log_p_g_r; 00267 //mutable real sum_log_p_g_r; 00268 00269 //##### Don't need to be saved ########################################## 00270 00271 enum{COST_DISCR=0, COST_APPROX_DISCR=1, COST_NON_DISCR=2}; // ### Watchout... also defined in NnlmOnlineLearner. 00272 enum{LEARNING_DISCRIMINANT=0, LEARNING_EMPIRICAL=1}; // Granted, this is not good. 00273 00274 // Specifies learning procedure 00275 int learning; 00276 00278 //{ 00280 int cost; 00281 00283 mutable int target; 00284 mutable int the_real_target; 00285 mutable int context; 00286 //} 00287 00288 //##### Intermediates ###################################################### 00289 00290 mutable real s; 00291 mutable real g_exponent; 00292 mutable real log_g_det_covariance; 00293 mutable real log_g_normalization; 00294 00295 mutable Vec vec_log_p_rg_t; 00296 mutable Vec vec_log_p_r_t; 00297 mutable Vec vec_log_p_rt; 00298 mutable real log_sum_p_ru; 00299 00300 // holds \Sigma^-1 (r-\mu) 00301 mutable Mat beta; 00302 00303 // holds pi[] * p_rg_t * \Sigma^-1 (r-\mu) 00304 //mutable Mat gamma; 00305 00306 mutable Vec nd_gradient; 00307 mutable Vec ad_gradient; 00308 mutable Vec fd_gradient; 00309 00310 mutable Vec bill; 00311 mutable Vec bob; 00312 00313 mutable Vec gradient_log_tmp; 00314 mutable Vec gradient_log_tmp_pos; 00315 mutable Vec gradient_log_tmp_neg; 00316 00317 Vec el_start_learning_rate; 00318 Vec el_decrease_constant; 00319 Vec el_last_update; 00320 00326 mutable real el_dr; 00327 mutable real dl_lr; 00328 00329 bool is_learning; 00330 00331 }; 00332 00333 // Declares a few other classes and functions related to this class 00334 DECLARE_OBJECT_PTR(NnlmOutputLayer); 00335 00336 } // end of namespace PLearn 00337 00338 #endif 00339 00340 00341 /* 00342 Local Variables: 00343 mode:c++ 00344 c-basic-offset:4 00345 c-file-style:"stroustrup" 00346 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00347 indent-tabs-mode:nil 00348 fill-column:79 00349 End: 00350 */ 00351 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :