PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // GaussianProcessRegressor.h 00004 // 00005 // Copyright (C) 2003 Yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 00036 00037 00038 /* ******************************************************* 00039 * $Id: GaussianProcessRegressor.h 9418 2008-09-02 15:33:46Z nouiz $ 00040 ******************************************************* */ 00041 00045 #ifndef GaussianProcessRegressor_INC 00046 #define GaussianProcessRegressor_INC 00047 00048 #include "PConditionalDistribution.h" 00049 #include <plearn/ker/Kernel.h> 00050 00051 namespace PLearn { 00052 using namespace std; 00053 00072 class GaussianProcessRegressor: public PConditionalDistribution 00073 { 00074 00075 public: 00076 typedef PConditionalDistribution inherited; 00077 // Build options 00078 00079 PP<Kernel> kernel; // kernel = prior covariance on functions 00080 int n_outputs; // dimension of the target variables 00081 Vec noise_sd; // output noise standard deviation, for each output dimension 00082 string Gram_matrix_normalization; // normalization method to apply to Gram matrix: 00083 // "none": no normalization 00084 // "centering_a_dot_product": this is the kernel PCA centering 00085 // K_{ij} <-- K_{ij} - mean_i(K_ij) - mean_j(K_ij) + mean_{ij}(K_ij) 00086 // "centering_a_distance": this is the MDS transformation of squared distances to dot products 00087 // K_{ij} <-- -0.5(K_{ij} - mean_i(K_ij) - mean_j(K_ij) + mean_{ij}(K_ij)) 00088 // "divisive": this is the spectral clustering and Laplacian eigenmaps normalization 00089 // K_{ij} <-- K_{ij}/sqrt(mean_i(K_ij) mean_j(K_ij)) 00090 // 00091 int max_nb_evectors; // if -1 compute all eigenvectors, o/w compute only that many principal eigenvectors 00092 00093 00094 // temporary fields that don't need to be saved = NON-OPTIONS 00095 00096 Mat alpha; // each row has the coefficients of K(x,x_j) in regression for i-th output 00097 mutable Vec Kxxi; // has K(x,x_i) for current input x 00098 mutable real Kxx; // has K(x,x) for current input x 00099 Mat K; // non-sparse Gram matrix 00100 Mat eigenvectors; // principal eigenvectors (in the rows!) 00101 Vec eigenvalues; // and corresponding eigenvalues 00102 Vec meanK; // meanK[j]=mean_i(K_{ij}) 00103 real mean_allK; 00104 00105 public: 00106 00107 GaussianProcessRegressor(); 00108 virtual ~GaussianProcessRegressor(); 00109 00111 virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies); 00112 00114 virtual void setInput(const Vec& input) const; 00115 00117 virtual double log_density(const Vec& x) const; 00118 00120 virtual Vec expectation() const; 00121 00123 virtual void expectation(Vec expected_y) const; 00124 00126 virtual Mat variance() const; 00127 virtual void variance(Vec diag_variances) const; 00128 00129 private: 00130 void build_(); 00131 00132 public: 00133 virtual void build(); 00134 00135 virtual void forget(); 00136 00137 virtual int outputsize() const; 00138 00141 virtual void train(); 00142 00143 virtual void computeOutput(const Vec& input, Vec& output) const; 00144 00149 virtual void computeCostsFromOutputs(const Vec& input, const Vec& output, 00150 const Vec& target, Vec& costs) const; 00151 00155 virtual void computeOutputAndCosts(const Vec& input, const Vec& target, 00156 Vec& output, Vec& costs) const; 00157 00161 virtual void computeCostsOnly(const Vec& input, const Vec& target, Vec& costs) const; 00162 00163 00165 virtual TVec<string> getTestCostNames() const; 00166 00169 virtual TVec<string> getTrainCostNames() const; 00170 00171 virtual int nTestCosts() const { return 2; } 00172 00173 virtual int nTrainCosts() const { return 2; } 00174 00176 int getTestCostIndex(const string& costname) const; 00177 00179 int getTrainCostIndex(const string& costname) const; 00180 00181 protected: 00182 static void declareOptions(OptionList& ol); 00183 00184 // covariance = K + sigma^2 I 00185 // multiply (K+sigma^2 I)^{-1} by vector v, put result in Cinv_v 00186 // TRICK USING PRINCIPAL E-VECTORS OF K: 00187 // Let C = sum_{i=1}^m lambda_i v_i v_i' + sigma I 00188 // with v_i orthonormal eigenvectors. Then, it can also be written 00189 // C = sum_{i=1}^m (lambda_i +sigma) v_i v_i' + sum_{i=m+1}^n sigma v_i v_i' 00190 // whose inverse is simply 00191 // inverse(C) = sum_{i=1}^m 1/(lambda_i +sigma) v_i v_i' + sum_{i=m+1}^n 1/sigma v_i v_i' 00192 // = sum_{i=1}^m (1/(lambda_i +sigma) - 1/sigma) v_i v_i' + 1/sigma I 00193 // set Cinv_v = inverse(C)*v, using given sigma in C 00194 void inverseCovTimesVec(real sigma, Vec v, Vec Cinv_v) const; 00195 // return u'*inverse(C)*u, using given sigma in C 00196 real QFormInverse(real sigma2, Vec u) const; 00197 00200 real BayesianCost(); 00201 00202 public: 00203 PLEARN_DECLARE_OBJECT(GaussianProcessRegressor); 00204 00205 }; 00206 00207 DECLARE_OBJECT_PTR(GaussianProcessRegressor); 00208 00209 } // end of namespace PLearn 00210 00211 #endif 00212 00213 00214 /* 00215 Local Variables: 00216 mode:c++ 00217 c-basic-offset:4 00218 c-file-style:"stroustrup" 00219 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00220 indent-tabs-mode:nil 00221 fill-column:79 00222 End: 00223 */ 00224 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :