PLearn 0.1
DivisiveNormalizationKernel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DivisiveNormalizationKernel.cc
00004 //
00005 // Copyright (C) 2004 Olivier Delalleau 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: DivisiveNormalizationKernel.cc 3994 2005-08-25 13:35:03Z chapados $ 
00037  ******************************************************* */
00038 
00039 // Authors: Olivier Delalleau
00040 
00044 #include "DivisiveNormalizationKernel.h"
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00050 // DivisiveNormalizationKernel //
00052 DivisiveNormalizationKernel::DivisiveNormalizationKernel() 
00053     : data_will_change(false),
00054       remove_bias(false)
00055 {}
00056 
00057 DivisiveNormalizationKernel::DivisiveNormalizationKernel(Ker the_source, bool the_remove_bias)
00058     : data_will_change(false),
00059       remove_bias(the_remove_bias)
00060 {
00061     source_kernel = the_source;
00062     build();
00063 }
00064 
00065 PLEARN_IMPLEMENT_OBJECT(DivisiveNormalizationKernel,
00066                         "Divisive normalization of an underlying kernel.",
00067                         "From a positive kernel K, defines a new kernel K' such that:\n"
00068                         "  K'(x,y) = K(x,y) / sqrt(E[K(x,x_i)] . E[K(x_i,y)])\n"
00069                         "where the expectation is performed on the data set.\n"
00070                         "If the 'remove_bias' option is set, then the expectation will not\n"
00071                         "take into account terms of the form K(x_i,x_i).\n"
00072     );
00073 
00075 // declareOptions //
00077 void DivisiveNormalizationKernel::declareOptions(OptionList& ol)
00078 {
00079     // Build options.
00080 
00081     declareOption(ol, "data_will_change", &DivisiveNormalizationKernel::data_will_change, OptionBase::buildoption,
00082                   "If set to 1, then the Gram matrix will be always recomputed, even if\n"
00083                   "it's not completely sure the data has changed.");
00084 
00085     declareOption(ol, "remove_bias", &DivisiveNormalizationKernel::remove_bias, OptionBase::buildoption,
00086                   "If set to 1, then the bias induced by the K(x_i,x_i) will be removed.\n");
00087 
00088     // Learnt options.
00089 
00090     declareOption(ol, "average_col", &DivisiveNormalizationKernel::average_col, OptionBase::learntoption,
00091                   "The average of the underlying kernel over each column of the Gram matrix.");
00092 
00093     declareOption(ol, "average_row", &DivisiveNormalizationKernel::average_row, OptionBase::learntoption,
00094                   "The average of the underlying kernel over each row of the Gram matrix.");
00095 
00096     // Now call the parent class' declareOptions
00097     inherited::declareOptions(ol);
00098 }
00099 
00101 // build //
00103 void DivisiveNormalizationKernel::build()
00104 {
00105     inherited::build();
00106     build_();
00107 }
00108 
00110 // build_ //
00112 void DivisiveNormalizationKernel::build_()
00113 {
00114     // ### This method should do the real building of the object,
00115     // ### according to set 'options', in *any* situation. 
00116     // ### Typical situations include:
00117     // ###  - Initial building of an object from a few user-specified options
00118     // ###  - Building of a "reloaded" object: i.e. from the complete set of all serialised options.
00119     // ###  - Updating or "re-building" of an object after a few "tuning" options have been modified.
00120     // ### You should assume that the parent class' build_() has already been called.
00121 }
00122 
00124 // computeAverage //
00126 real DivisiveNormalizationKernel::computeAverage(const Vec& x, bool on_row, real squared_norm_of_x) const {
00127     all_k_x.resize(n_examples);
00128     if (is_symmetric || !on_row) {
00129         source_kernel->evaluate_all_i_x(x, all_k_x, squared_norm_of_x);
00130     } else {
00131         source_kernel->evaluate_all_x_i(x, all_k_x, squared_norm_of_x);
00132     }
00133     return sum(all_k_x) / real(n_examples);
00134 }
00135 
00137 // computeGramMatrix //
00139 void DivisiveNormalizationKernel::computeGramMatrix(Mat K) const {
00140     // Uses default Kernel implementation.
00141     Kernel::computeGramMatrix(K);
00142 }
00143 
00145 // evaluate //
00147 real DivisiveNormalizationKernel::evaluate(const Vec& x1, const Vec& x2) const {
00148     real avg_1 = computeAverage(x1, true);
00149     real avg_2 = computeAverage(x2, false);
00150     return source_kernel->evaluate(x1, x2) / sqrt(avg_1 * avg_2);
00151 }
00152 
00154 // evaluate_i_j //
00156 real DivisiveNormalizationKernel::evaluate_i_j(int i, int j) const {
00157     return source_kernel->evaluate_i_j(i,j) / sqrt(average_row[i] * average_col[j]);
00158 }
00159 
00161 // evaluate_i_x //
00163 real DivisiveNormalizationKernel::evaluate_i_x(int i, const Vec& x, real squared_norm_of_x) const {
00164     return source_kernel->evaluate_i_x(i, x, squared_norm_of_x)
00165         / sqrt(average_row[i] * computeAverage(x, false, squared_norm_of_x));
00166 }
00167 
00169 // evaluate_i_x_again //
00171 real DivisiveNormalizationKernel::evaluate_i_x_again(int i, const Vec& x, real squared_norm_of_x, bool first_time) const {
00172     if (first_time) {
00173         avg_evaluate_i_x_again = computeAverage(x, false, squared_norm_of_x);
00174     }
00175     return source_kernel->evaluate_i_x_again(i, x, squared_norm_of_x, first_time)
00176         / sqrt(average_row[i] * avg_evaluate_i_x_again);
00177 }
00178 
00180 // evaluate_x_i //
00182 real DivisiveNormalizationKernel::evaluate_x_i(const Vec& x, int i, real squared_norm_of_x) const {
00183     return source_kernel->evaluate_x_i(x, i, squared_norm_of_x)
00184         / sqrt(average_col[i] * computeAverage(x, true, squared_norm_of_x));
00185 }
00186 
00188 // evaluate_x_i_again //
00190 real DivisiveNormalizationKernel::evaluate_x_i_again(const Vec& x, int i, real squared_norm_of_x, bool first_time) const {
00191     if (first_time) {
00192         avg_evaluate_x_i_again = computeAverage(x, true, squared_norm_of_x);
00193     }
00194     return source_kernel->evaluate_x_i_again(x, i, squared_norm_of_x, first_time)
00195         / sqrt(average_col[i] * avg_evaluate_x_i_again);
00196 }
00197 
00199 // makeDeepCopyFromShallowCopy //
00201 void DivisiveNormalizationKernel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00202 {
00203     inherited::makeDeepCopyFromShallowCopy(copies);
00204 
00205     // ### Call deepCopyField on all "pointer-like" fields 
00206     // ### that you wish to be deepCopied rather than 
00207     // ### shallow-copied.
00208     // ### ex:
00209     // deepCopyField(trainvec, copies);
00210 
00211     // ### Remove this line when you have fully implemented this method.
00212     PLERROR("DivisiveNormalizationKernel::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00213 }
00214 
00216 // setDataForKernelMatrix //
00218 void DivisiveNormalizationKernel::setDataForKernelMatrix(VMat the_data) {
00219     bool there_was_data_and_it_changed = data && !(data->looksTheSameAs(the_data));
00220     // Set the data for this kernel as well as for the underlying kernel.
00221     inherited::setDataForKernelMatrix(the_data);
00222     // Check whether we need to recompute the Gram matrix and its average.
00223     int n = the_data->length();
00224     if (   data_will_change
00225            || average_row.length() != n
00226            || there_was_data_and_it_changed) {
00227         // Compute the underlying Gram matrix.
00228         Mat gram(n, n);
00229         source_kernel->computeGramMatrix(gram);
00230         // Compute the row (and column) average.
00231         average_row.resize(n);
00232         average_row.fill(0);
00233         if (is_symmetric) {
00234             average_col = average_row;
00235         } else {
00236             average_col.resize(n);
00237             average_col.fill(0);
00238         }
00239         real k_x_x;
00240         for (int i = 0; i < n; i++) {
00241             if (is_symmetric) {
00242                 real v;
00243                 k_x_x = gram(i,i);
00244                 if (!remove_bias) {
00245                     average_row[i] += k_x_x;
00246                 }
00247                 for (int j = i + 1; j < n; j++) {
00248                     v = gram(i,j);
00249                     average_row[i] += v;
00250                     average_row[j] += v;
00251                 }
00252             } else {
00253                 for (int j = 0; j < n; j++) {
00254                     if (!remove_bias || j != i) {
00255                         average_row[i] += gram(i,j);
00256                         average_col[i] += gram(j,i);
00257                         if (j == i) {
00258                         }
00259                     }
00260                 }
00261             }
00262         }
00263         real n_terms_in_sum;    // The number of terms summed in average_row.
00264         if (remove_bias) {
00265             // The diagonal terms were not added.
00266             n_terms_in_sum = real(n - 1);
00267         } else {
00268             n_terms_in_sum = real(n);
00269         }
00270         average_row /= n_terms_in_sum;
00271         if (!is_symmetric) {
00272             average_col /= n_terms_in_sum;
00273         }
00274     }
00275 }
00276 
00277 } // end of namespace PLearn
00278 
00279 
00280 /*
00281   Local Variables:
00282   mode:c++
00283   c-basic-offset:4
00284   c-file-style:"stroustrup"
00285   c-file-offsets:((innamespace . 0)(inline-open . 0))
00286   indent-tabs-mode:nil
00287   fill-column:79
00288   End:
00289 */
00290 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines