PLearn 0.1
MixtureDistribution.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // MixtureDistribution.cc
00004 //
00005 // Copyright (C) 2008 Olivier Delalleau
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Olivier Delalleau
00036 
00040 #include "MixtureDistribution.h"
00041 #include <plearn/math/TMat_maths.h>
00042 
00043 namespace PLearn {
00044 using namespace std;
00045 
00046 PLEARN_IMPLEMENT_OBJECT(
00047     MixtureDistribution,
00048     "Weighted mixture of n distributions.",
00049     "Note that the weights are fixed and not learnt."
00050 );
00051 
00053 // MixtureDistribution //
00055 MixtureDistribution::MixtureDistribution()
00056 {}
00057 
00059 // declareOptions //
00061 void MixtureDistribution::declareOptions(OptionList& ol)
00062 {
00063     declareOption(ol, "distributions", &MixtureDistribution::distributions,
00064                   OptionBase::buildoption,
00065         "Underlying distributions being mixed.");
00066 
00067     declareOption(ol, "weights", &MixtureDistribution::weights,
00068                   OptionBase::buildoption,
00069         "Weights of the distributions (must sum to 1). If left empty, then\n"
00070         "each distribution will be given a weight 1/number_of_distributions.");
00071 
00072     // Now call the parent class' declareOptions().
00073     inherited::declareOptions(ol);
00074 
00075     // Hide unused options.
00076 
00077     redeclareOption(ol, "predicted_size",
00078                     &MixtureDistribution::predicted_size,
00079                     OptionBase::nosave,
00080         "Unused");
00081 
00082     redeclareOption(ol, "predictor_part",
00083                     &MixtureDistribution::predictor_part,
00084                     OptionBase::nosave,
00085         "Unused");
00086 
00087     redeclareOption(ol, "predictor_size",
00088                     &MixtureDistribution::predictor_size,
00089                     OptionBase::nosave,
00090         "Unused");
00091 
00092 
00093 }
00094 
00096 // build //
00098 void MixtureDistribution::build()
00099 {
00100     inherited::build();
00101     build_();
00102 }
00103 
00105 // build_ //
00107 void MixtureDistribution::build_()
00108 {
00109     if (distributions.isEmpty())
00110         return;
00111     if (weights.isEmpty()) {
00112         int n = distributions.length();
00113         weights.resize(n);
00114         weights.fill(1 / real(n));
00115     }
00116     PLCHECK_MSG(weights.length() == distributions.length() &&
00117                  is_equal(PLearn::sum(weights), 1),
00118                  "There must be one weight for each distribution, and the "
00119                  "weights must sum to 1");
00120     getSizes();
00121 }
00122 
00124 // cdf //
00126 real MixtureDistribution::cdf(const Vec& y) const
00127 {
00128     PLERROR("cdf not implemented for MixtureDistribution"); return 0;
00129 }
00130 
00132 // expectation //
00134 void MixtureDistribution::expectation(Vec& mu) const
00135 {
00136     PLASSERT( !distributions.isEmpty() );
00137     mu.resize(distributions[0]->getNPredicted());
00138     mu.fill(0);
00139     for (int i = 0; i < distributions.length(); i++) {
00140         distributions[i]->expectation(work);
00141         multiplyAcc(mu, work, weights[i]);
00142     }
00143 }
00144 
00146 // forget //
00148 void MixtureDistribution::forget()
00149 {
00150     for (int i = 0; i < distributions.length(); i++)
00151         distributions[i]->forget();
00152     inherited::forget();
00153     getSizes();
00154 }
00155 
00157 // generate //
00159 void MixtureDistribution::generate(Vec& y) const
00160 {
00161     int j = random_gen->multinomial_sample(weights);
00162     distributions[j]->generate(y);
00163 }
00164 
00166 // getSizes //
00168 void MixtureDistribution::getSizes() const {
00169     PLASSERT( !distributions.isEmpty() );
00170     n_predicted = distributions[0]->getNPredicted();
00171     n_predictor = distributions[0]->getNPredictor();
00172 }
00173 
00175 // log_density //
00177 real MixtureDistribution::log_density(const Vec& y) const
00178 {
00179     int n = distributions.length();
00180     work.resize(n);
00181     for (int i = 0; i < n; i++) {
00182         work[i] = distributions[i]->log_density(y) + pl_log(weights[i]);
00183     }
00184     return logadd(work);
00185 }
00186 
00188 // makeDeepCopyFromShallowCopy //
00190 void MixtureDistribution::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00191 {
00192     inherited::makeDeepCopyFromShallowCopy(copies);
00193 
00194     // ### Call deepCopyField on all "pointer-like" fields
00195     // ### that you wish to be deepCopied rather than
00196     // ### shallow-copied.
00197     // ### ex:
00198     // deepCopyField(trainvec, copies);
00199 
00200     // ### Remove this line when you have fully implemented this method.
00201     PLERROR("MixtureDistribution::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00202 }
00203 
00205 // resetGenerator //
00207 void MixtureDistribution::resetGenerator(long g_seed)
00208 {
00209     for (int i = 0; i < distributions.length(); i++)
00210         distributions[i]->resetGenerator(g_seed);
00211     inherited::resetGenerator(g_seed);
00212 }
00213 
00215 // setPredictor //
00217 void MixtureDistribution::setPredictor(const Vec& predictor, bool call_parent) const
00218 {
00219     if (call_parent)
00220         inherited::setPredictor(predictor, true);
00221     for (int i = 0; i < distributions.length(); i++)
00222         distributions[i]->setPredictor(predictor, call_parent);
00223     getSizes();
00224 }
00225 
00227 // setPredictorPredictedSizes //
00229 bool MixtureDistribution::setPredictorPredictedSizes(int the_predictor_size,
00230                                                int the_predicted_size,
00231                                                bool call_parent)
00232 {
00233     bool sizes_have_changed = false;
00234     if (call_parent)
00235         sizes_have_changed = inherited::setPredictorPredictedSizes(
00236                 the_predictor_size, the_predicted_size, true);
00237     for (int i = 0; i < distributions.length(); i++)
00238         distributions[i]->setPredictorPredictedSizes(the_predictor_size,
00239                                                      the_predicted_size,
00240                                                      call_parent);
00241     getSizes();
00242     // Returned value.
00243     return sizes_have_changed;
00244 }
00245 
00247 // survival_fn //
00249 real MixtureDistribution::survival_fn(const Vec& y) const
00250 {
00251     PLERROR("survival_fn not implemented for MixtureDistribution"); return 0;
00252 }
00253 
00255 // train //
00257 void MixtureDistribution::train()
00258 {
00259     // This generic PLearner method does a number of standard stuff useful for
00260     // (almost) any learner, and return 'false' if no training should take
00261     // place. See PLearner.h for more details.
00262     if (!initTrain())
00263         return;
00264 
00265     PLCHECK( nstages == 1 && stage == 0 );
00266     for (int i = 0; i < distributions.length(); i++)
00267         distributions[i]->train();
00268     stage = 1;
00269     getSizes();
00270 }
00271 
00273 // variance //
00275 void MixtureDistribution::variance(Mat& covar) const
00276 {
00277     PLERROR("variance not implemented for MixtureDistribution");
00278 }
00279 
00280 } // end of namespace PLearn
00281 
00282 
00283 /*
00284   Local Variables:
00285   mode:c++
00286   c-basic-offset:4
00287   c-file-style:"stroustrup"
00288   c-file-offsets:((innamespace . 0)(inline-open . 0))
00289   indent-tabs-mode:nil
00290   fill-column:79
00291   End:
00292 */
00293 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines