PLearn 0.1
KMeansClustering.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // KMeansClustering.cc
00004 //
00005 // Copyright (C) 2004 Jean-Sébastien Senécal 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: KMeansClustering.cc 6861 2007-04-09 19:04:15Z saintmlx $ 
00037  ******************************************************* */
00038 
00039 // Authors: Jean-Sébastien Senécal
00040 
00044 #include "KMeansClustering.h"
00045 #include <plearn/math/random.h>
00046 
00047 namespace PLearn {
00048 using namespace std;
00049 
00050 KMeansClustering::KMeansClustering()
00051     : inherited(), n_clusters_(0), clusters_()
00052 {
00053 }
00054 
00055 PLEARN_IMPLEMENT_OBJECT(KMeansClustering,
00056                         "The K-Means algorithm.",
00057                         "This class implements the K-means algorithm. The outputs contain the "
00058                         "negative squared euclidian distance to each centroid.");
00059 
00060 void KMeansClustering::declareOptions(OptionList& ol)
00061 {
00062     declareOption(ol, "n_clusters", &KMeansClustering::n_clusters_, OptionBase::buildoption,
00063                   "The number of clusters.");
00064     declareOption(ol, "clusters", &KMeansClustering::clusters_, OptionBase::learntoption,
00065                   "The learned centroids.");
00066     // Now call the parent class' declareOptions
00067     inherited::declareOptions(ol);
00068 }
00069 
00070 void KMeansClustering::build_()
00071 {
00072 }
00073 
00074 void KMeansClustering::build()
00075 {
00076     inherited::build();
00077     build_();
00078 }
00079 
00080 
00081 void KMeansClustering::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00082 {
00083     inherited::makeDeepCopyFromShallowCopy(copies);
00084     deepCopyField(clusters_, copies);
00085 }
00086 
00087 
00088 int KMeansClustering::outputsize() const
00089 {
00090     return n_clusters_;
00091 }
00092 
00093 void KMeansClustering::forget()
00094 {
00095     if (n_clusters_ <= 0)
00096         PLERROR("In KMeansClustering::build_(): number of clusters (%d) should be > 0", n_clusters_);
00097 
00098     static Vec input;  // static so we don't reallocate/deallocate memory each time...
00099     static Vec target; // (but be careful that static means shared!)
00100 
00101     input.resize(inputsize());    // the train_set's inputsize()
00102     target.resize(targetsize());  // the train_set's targetsize()
00103     clusters_.resize(n_clusters_, inputsize());
00104 
00105     real weight;
00106     
00107     manual_seed(seed_);
00108   
00109     // Build a vector of samples indexes to initialize clusters centers.
00110     Vec start_idx(n_clusters_, -1.0);
00111     int idx;
00112     for (int i=0; i<n_clusters_; i++)
00113     {
00114         bool uniq=false;
00115         while (!uniq)
00116         {
00117             uniq=true;
00118             idx = uniform_multinomial_sample(train_set.length());
00119             for (int j=0; j < n_clusters_ && start_idx[j] != -1.0; j++)
00120                 if (start_idx[j] == idx)
00121                 {
00122                     uniq=false;
00123                     break;
00124                 }
00125         }
00126         start_idx[i] = idx;
00127         train_set->getExample(idx,input,target,weight);
00128         clusters_(i) << input;
00129     }
00130 
00131     stage = 0;
00132 }
00133     
00134 void KMeansClustering::train()
00135 {
00136     // The role of the train method is to bring the learner up to stage==nstages,
00137     // updating train_stats with training costs measured on-line in the process.
00138 
00139     PLASSERT( n_clusters_ > 0 );
00140   
00141     static Vec input;  // static so we don't reallocate/deallocate memory each time...
00142     static Vec target; // (but be careful that static means shared!)
00143 
00144     input.resize(inputsize());    // the train_set's inputsize()
00145     target.resize(targetsize());  // the train_set's targetsize()
00146     clusters_.resize(n_clusters_, inputsize());
00147     
00148     real weight;
00149 
00150     if(!train_stats)  // make a default stats collector, in case there's none
00151         train_stats = new VecStatsCollector();
00152 
00153     if(nstages<stage) // asking to revert to a previous stage!
00154         forget();  // reset the learner to stage=0
00155 
00156     Vec samples_per_cluster(n_clusters_);
00157   
00158     Mat new_clusters(n_clusters_,train_set->inputsize());
00159     TVec<int> cluster_idx(train_set.length());
00160     TVec<int> old_cluster_idx(train_set.length());
00161     Vec train_costs(nTrainCosts());
00162     clusters_.resize(n_clusters_,train_set->inputsize());
00163 
00164     bool stop = false;
00165     // Training loop.
00166     while(!stop && stage<nstages)
00167     {
00168         // Clear statistics of previous epoch.
00169         train_stats->forget();
00170 
00171         // Init.
00172         new_clusters.clear();
00173         samples_per_cluster.clear();
00174         old_cluster_idx << cluster_idx;
00175         train_costs.clear();
00176 
00177         // Redistribute points in closest centroid.
00178         for (int i=0; i<train_set.length(); i++)
00179         {
00180             train_set->getExample(i,input,target,weight);
00181             real dist, bestdist=1E300;
00182             int bestclust=0;
00183       
00184             if (n_clusters_ > 1)
00185                 for (int j=0; j<n_clusters_; j++)
00186                     if ((dist = powdistance(clusters_(j), input, 2)) < bestdist)
00187                     {
00188                         bestdist = dist;
00189                         bestclust = j;
00190                     }
00191       
00192             cluster_idx[i] = bestclust;
00193             samples_per_cluster[bestclust] += weight;
00194             new_clusters(bestclust) += input * weight;
00195             train_costs[0] += bestdist;
00196         }
00197 
00198         train_costs[0] /= train_set.length();
00199 
00200         // Update train statistics.
00201         train_stats->update(train_costs);
00202         train_stats->finalize(); // finalize statistics for this epoch
00203 
00204         // Compute new centroids.
00205         for (int i=0; i<n_clusters_; i++)
00206             if (samples_per_cluster[i]>0)
00207                 new_clusters(i) /= samples_per_cluster[i];
00208         clusters_ << new_clusters;
00209 
00210         // Check if things have changed (if not, stop training).
00211         stop=true;
00212         if (n_clusters_ > 1)
00213             for (int i=0;i<train_set.length();i++)
00214                 if (old_cluster_idx[i] != cluster_idx[i])
00215                 {
00216                     stop=false;
00217                     break;
00218                 }
00219 
00220         ++stage; // next stage
00221     }
00222 }
00223 
00224 
00225 void KMeansClustering::computeOutput(const Vec& input, Vec& output) const
00226 {
00227     // Compute the output from the input.
00228     int nout = outputsize();
00229     output.resize(nout);
00230 
00231     for (int j=0; j<n_clusters_; j++)
00232         output[j] = -powdistance(clusters_(j), input, 2);
00233 }
00234 
00235 void KMeansClustering::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00236                                                const Vec& target, Vec& costs) const
00237 {
00238     // Compute the costs from *already* computed output.
00239     costs.resize(1);
00240     int cluster = argmax(output);
00241   
00242     costs[0] = - output[cluster];
00243 }
00244 
00245 TVec<string> KMeansClustering::getTestCostNames() const
00246 {
00247     // Return the names of the costs computed by computeCostsFromOutpus
00248     // (these may or may not be exactly the same as what's returned by getTrainCostNames).
00249     return TVec<string>(1, "squared_reconstruction_error");
00250 }
00251 
00252 TVec<string> KMeansClustering::getTrainCostNames() const
00253 {
00254     // Return the names of the objective costs that the train method computes and 
00255     // for which it updates the VecStatsCollector train_stats
00256     // (these may or may not be exactly the same as what's returned by getTestCostNames).
00257     return getTestCostNames();
00258 }
00259 
00260 
00261 } // end of namespace PLearn
00262 
00263 
00264 /*
00265   Local Variables:
00266   mode:c++
00267   c-basic-offset:4
00268   c-file-style:"stroustrup"
00269   c-file-offsets:((innamespace . 0)(inline-open . 0))
00270   indent-tabs-mode:nil
00271   fill-column:79
00272   End:
00273 */
00274 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines