PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // GaussianProcessRegressor.h 00004 // 00005 // Copyright (C) 2006--2009 Nicolas Chapados 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: .pyskeleton_header 544 2003-09-01 00:05:31Z plearner $ 00037 ******************************************************* */ 00038 00039 // Authors: Nicolas Chapados 00040 00044 #ifndef GaussianProcessRegressor_INC 00045 #define GaussianProcessRegressor_INC 00046 00047 // From PLearn 00048 #include <plearn/ker/Kernel.h> 00049 #include <plearn_learners/generic/PLearner.h> 00050 00051 namespace PLearn { 00052 00053 class GaussianProcessNLLVariable; 00054 class Optimizer; 00055 00114 class GaussianProcessRegressor : public PLearner 00115 { 00116 typedef PLearner inherited; 00117 00118 public: 00119 //##### Public Build Options ############################################ 00120 00123 Ker m_kernel; 00124 00130 real m_weight_decay; 00131 00139 bool m_include_bias; 00140 00147 bool m_compute_confidence; 00148 00154 real m_confidence_epsilon; 00155 00165 TVec< pair<string,string> > m_hyperparameters; 00166 00178 pair<string,string> m_ARD_hyperprefix_initval; 00179 00184 PP<Optimizer> m_optimizer; 00185 00192 bool m_save_gram_matrix; 00193 00201 string m_solution_algorithm; 00202 00209 TVec<int> m_active_set_indices; 00210 00211 00212 public: 00213 //##### Public Member Functions ######################################### 00214 00216 GaussianProcessRegressor(); 00217 00218 00219 //##### PLearner Member Functions ####################################### 00220 00223 virtual void setTrainingSet(VMat training_set, bool call_forget=true); 00224 00227 virtual int outputsize() const; 00228 00232 virtual void forget(); 00233 00237 virtual void train(); 00238 00240 virtual void computeOutput(const Vec& input, Vec& output) const; 00241 00243 virtual void computeCostsFromOutputs(const Vec& input, const Vec& output, 00244 const Vec& target, Vec& costs) const; 00245 00247 virtual 00248 bool computeConfidenceFromOutput(const Vec& input, const Vec& output, 00249 real probability, 00250 TVec< pair<real,real> >& intervals) const; 00251 00255 virtual void computeOutputCovMat(const Mat& inputs, Mat& outputs, 00256 TVec<Mat>& covariance_matrices) const; 00257 00260 virtual TVec<std::string> getTestCostNames() const; 00261 00264 virtual TVec<std::string> getTrainCostNames() const; 00265 00266 00267 //##### PLearn::Object Protocol ######################################### 00268 00269 // Declares other standard object methods. 00270 PLEARN_DECLARE_OBJECT(GaussianProcessRegressor); 00271 00272 // Simply calls inherited::build() then build_() 00273 virtual void build(); 00274 00276 virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies); 00277 00278 protected: 00280 static void declareOptions(OptionList& ol); 00281 00285 void computeOutputAux(const Vec& input, Vec& output, 00286 Vec& kernel_evaluations) const; 00287 00291 PP<GaussianProcessNLLVariable> hyperOptimize( 00292 const Mat& inputs, const Mat& targets, VarArray& hyperparam_vars); 00293 00296 void trainProjectedProcess(const Mat& all_training_inputs, 00297 const Mat& sub_training_inputs, 00298 const Mat& all_training_targets); 00299 00300 protected: 00301 //##### Protected Options ############################################### 00302 00315 Mat m_alpha; 00316 00326 Mat m_gram_inverse; 00327 00332 Mat m_subgram_inverse; 00333 00335 Vec m_target_mean; 00336 00341 Mat m_training_inputs; 00342 00344 mutable Vec m_kernel_evaluations; 00345 00347 mutable Vec m_gram_inverse_product; 00348 00350 mutable TVec< pair<real,real> > m_intervals; 00351 00354 mutable Mat m_gram_traintest_inputs; 00355 00357 mutable Mat m_gram_inv_traintest_product; 00358 00360 mutable Mat m_sigma_reductor; 00361 00364 enum { 00365 AlgoExact, 00366 AlgoProjectedProcess 00367 } m_algorithm_enum; 00368 00369 private: 00371 void build_(); 00372 }; 00373 00374 // Declares a few other classes and functions related to this class 00375 DECLARE_OBJECT_PTR(GaussianProcessRegressor); 00376 00377 } // end of namespace PLearn 00378 00379 #endif 00380 00381 00382 /* 00383 Local Variables: 00384 mode:c++ 00385 c-basic-offset:4 00386 c-file-style:"stroustrup" 00387 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00388 indent-tabs-mode:nil 00389 fill-column:79 00390 End: 00391 */ 00392 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :