PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: Kernel.h 10083 2009-04-04 20:33:18Z chapados $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #ifndef Kernel_INC 00044 #define Kernel_INC 00045 00046 #include <plearn/base/Object.h> 00047 #include <plearn/math/TMat_maths.h> 00048 #include <plearn/vmat/VMat.h> 00049 00050 namespace PLearn { 00051 using namespace std; 00052 00053 class Kernel: public Object 00054 { 00055 00056 private: 00057 00058 typedef Object inherited; 00059 00061 mutable Vec evaluate_xi, evaluate_xj, k_xi_x; 00062 00064 mutable bool lock_xi, lock_xj, lock_k_xi_x; 00065 00066 protected: 00067 00068 VMat data; 00069 int data_inputsize; 00070 mutable Mat gram_matrix; 00071 mutable TVec<Mat> sparse_gram_matrix; 00072 mutable bool gram_matrix_is_cached; 00073 mutable bool sparse_gram_matrix_is_cached; 00074 int n_examples; 00075 00076 static void declareOptions(OptionList& ol); 00077 00079 static void declareMethods(RemoteMethodMap& rmm); 00080 00081 public: 00082 00084 bool cache_gram_matrix; 00085 bool is_symmetric; 00086 int report_progress; 00087 VMat specify_dataset; 00088 00090 Kernel(bool is__symmetric = true, bool call_build_ = false); 00091 00092 PLEARN_DECLARE_ABSTRACT_OBJECT(Kernel); 00093 00095 virtual real evaluate(const Vec& x1, const Vec& x2) const = 0; 00096 00101 virtual void train(VMat data); 00102 00104 00110 virtual void setDataForKernelMatrix(VMat the_data); 00111 00119 virtual void addDataForKernelMatrix(const Vec& newRow); 00120 00121 00123 int dataInputsize() const 00124 { 00125 return data_inputsize; 00126 } 00127 00129 int nExamples() const 00130 { 00131 return n_examples; 00132 } 00133 00135 virtual real evaluate_i_j(int i, int j) const; 00136 00143 virtual real evaluate_i_x(int i, const Vec& x, real squared_norm_of_x=-1) const; 00144 00147 virtual real evaluate_x_i(const Vec& x, int i, real squared_norm_of_x=-1) const; 00148 00155 virtual real evaluate_i_x_again(int i, const Vec& x, real squared_norm_of_x=-1, bool first_time = false) const; 00156 virtual real evaluate_x_i_again(const Vec& x, int i, real squared_norm_of_x=-1, bool first_time = false) const; 00157 00159 virtual void computeGramMatrix(Mat K) const; 00160 virtual Mat returnComputedGramMatrix() const; 00161 00168 virtual void computePartialGramMatrix(const TVec<int>& subset_indices, 00169 Mat K) const; 00170 00181 virtual void computeTestGramMatrix(Mat test_elements, 00182 Mat K, Vec self_cov) const; 00183 00190 virtual void computeSparseGramMatrix(TVec<Mat> K) const; 00191 00201 virtual void computeGramMatrixDerivative(Mat& KD, const string& kernel_param, 00202 real epsilon=1e-6) const; 00203 00206 virtual void setParameters(Vec paramvec); 00207 virtual Vec getParameters() const; 00208 00210 virtual void evaluate_all_i_x(const Vec& x, const Vec& k_xi_x, 00211 real squared_norm_of_x=-1, int istart = 0) const; 00212 00214 virtual void evaluate_all_x_i(const Vec& x, const Vec& k_x_xi, 00215 real squared_norm_of_x=-1, int istart = 0) const; 00216 00218 00219 void apply(VMat m1, VMat m2, Mat& result) const; 00220 Mat apply(VMat m1, VMat m2) const; 00221 void apply(VMat m, const Vec& x, Vec& result) const; 00222 void apply(Vec x, VMat m, Vec& result) const; 00223 00224 inline real operator()(const Vec& x1, const Vec& x2) const 00225 { 00226 return evaluate(x1,x2); 00227 } 00228 00230 bool hasData(); 00231 00233 inline VMat getData() {return this->data;} 00234 00238 bool isInData(const Vec& x, int* i = 0) const; 00239 00244 void computeNearestNeighbors(const Vec& x, Mat& k_xi_x_sorted, int knn) const; 00245 00249 static TMat<int> computeKNNeighbourMatrixFromDistanceMatrix(const Mat& D, int knn, bool insure_self_first_neighbour=true, bool report_progress = false); 00250 00254 static Mat computeNeighbourMatrixFromDistanceMatrix(const Mat& D, bool insure_self_first_neighbour=true, bool report_progress = false); 00255 00256 Mat estimateHistograms(VMat d, real sameness_threshold, real minval, real maxval, int nbins) const; 00257 Mat estimateHistograms(Mat input_and_class, real minval, real maxval, int nbins) const; 00258 real test(VMat d, real threshold, real sameness_below_threshold, real sameness_above_threshold) const; 00259 virtual void build(); 00260 00261 virtual ~Kernel(); 00262 00263 virtual void makeDeepCopyFromShallowCopy(CopiesMap& copies); 00264 00265 private: 00266 00267 void build_(); 00268 00269 }; 00270 DECLARE_OBJECT_PTR(Kernel); 00271 00276 class Ker: public PP<Kernel> 00277 { 00278 public: 00279 Ker() {} 00280 Ker(Kernel* v) :PP<Kernel>(v) {} 00281 Ker(const Ker& other) :PP<Kernel>(other) {} 00282 00283 real operator()(const Vec& x1, const Vec& x2) const 00284 { return ptr->evaluate(x1,x2); } 00285 }; 00286 00287 DECLARE_OBJECT_PP(Ker, Kernel); 00288 00289 template <> 00290 inline 00291 void deepCopyField(Ker& field, CopiesMap& copies) 00292 { 00293 if (field) 00294 field = static_cast<Kernel*>(field->deepCopy(copies)); 00295 } 00296 00297 // last column of data is supposed to be a classnum 00298 // returns a matrix of (index1, index2, distance) 00299 Mat findClosestPairsOfDifferentClass(int k, VMat data, Ker dist); 00300 00303 00304 inline Array<Ker> operator&(const Ker& k1, const Ker& k2) 00305 { return Array<Ker>(k1,k2); } 00306 00307 /*!************ 00308 * CostFunc * 00309 ************ 00310 */ 00311 00313 typedef Ker CostFunc; 00314 00315 /*!********************************************************************** 00316 FINANCIAL STUFF 00317 ********************************************************************** 00318 */ 00319 00321 typedef CostFunc ProfitFunc; 00322 00323 00324 } // end of namespace PLearn 00325 00326 #endif 00327 00328 00329 /* 00330 Local Variables: 00331 mode:c++ 00332 c-basic-offset:4 00333 c-file-style:"stroustrup" 00334 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00335 indent-tabs-mode:nil 00336 fill-column:79 00337 End: 00338 */ 00339 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :