PLearn 0.1
ConditionalDensityNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // ConditionalDensityNet.cc
00004 //
00005 // Copyright (C) 2003 Yoshua Bengio 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: ConditionalDensityNet.cc 9418 2008-09-02 15:33:46Z nouiz $ 
00037  ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio
00040 
00043 #include <plearn/var/AffineTransformVariable.h>
00044 #include <plearn/var/AffineTransformWeightPenalty.h>
00045 #include "ConditionalDensityNet.h"
00046 #include <plearn/var/ConcatColumnsVariable.h>
00047 #include <plearn/var/ConcatRowsVariable.h>
00048 #include <plearn/var/CutBelowThresholdVariable.h>
00049 #include <plearn/display/DisplayUtils.h>
00050 #include <plearn/var/DotProductVariable.h>
00051 #include <plearn/var/IfThenElseVariable.h>
00052 #include <plearn/var/IsAboveThresholdVariable.h>
00053 #include <plearn/var/LogVariable.h>
00054 //#include "DilogarithmVariable.h"
00055 #include <plearn/var/SoftSlopeVariable.h>
00056 #include <plearn/var/SoftSlopeIntegralVariable.h>
00057 //#include "plapack.h"
00058 #include <plearn/var/SoftplusVariable.h>
00059 #include <plearn/var/SubMatTransposeVariable.h>
00060 #include <plearn/var/SubMatVariable.h>
00061 #include <plearn/var/SumAbsVariable.h>
00062 #include <plearn/var/SumOfVariable.h>
00063 #include <plearn/var/SumSquareVariable.h>
00064 #include <plearn/var/SumVariable.h>
00065 #include <plearn/var/TanhVariable.h>
00066 #include <plearn/var/TransposeProductVariable.h>
00067 #include <plearn/math/random.h>
00068 
00069 namespace PLearn {
00070 using namespace std;
00071 
00072 ConditionalDensityNet::ConditionalDensityNet() 
00073     : nhidden(0),
00074       nhidden2(0),
00075       weight_decay(0),
00076       bias_decay(1e-6),
00077       layer1_weight_decay(0),
00078       layer1_bias_decay(0),
00079       layer2_weight_decay(0),
00080       layer2_bias_decay(0),
00081       output_layer_weight_decay(0),
00082       output_layer_bias_decay(0),
00083       direct_in_to_out_weight_decay(0),
00084       penalty_type("L2_square"),
00085       L1_penalty(false),
00086       direct_in_to_out(false),
00087       batch_size(1),
00088       c_penalization(0),
00089       maxY(1), // if Y is normalized to be in interval [0,1], that would be OK
00090       thresholdY(0.1),
00091       log_likelihood_vs_squared_error_balance(1),
00092       separate_mass_point(1),
00093       n_output_density_terms(0),
00094       generate_precision(1e-3),
00095       steps_type("sloped_steps"),
00096       centers_initialization("data"),
00097       curve_positions("uniform"),
00098       scale(5.0),
00099       unconditional_p0(0.01),
00100       mu_is_fixed(true),
00101       initial_hardness(1)
00102 {}
00103 
00104 PLEARN_IMPLEMENT_OBJECT(ConditionalDensityNet, "Neural Network that Implements a Positive Random Variable Conditional Density", 
00105                         "The input vector is used to compute parameters of an output density or output\n"
00106                         "cumulative distribution as well as output expected value. The ASSUMPTIONS\n"
00107                         "on the generating distribution P(Y|X) are the following:\n"
00108                         "  * Y is a single real value\n"
00109                         "  * 0 <= Y <= maxY, with maxY a known finite value\n"
00110                         "  * the density has a mass point at Y=0\n"
00111                         "  * the density is continuous for Y>0\n"
00112                         "The form of the conditional cumulative of Y is the following (separate_mass_points=false):\n"
00113                         "   P(Y<=y|theta) = (1/Z) (s(a) + sum_i u_i s(b_i) g(y,theta,i))\n"
00114                         "or for separate_mass_point=true:\n"
00115                         "   P(Y<=y|theta) = sigmoid(a) + (1-sigmoid(a))(sum_i u_i s(b_i) (g(y,theta,i)-g(0,theta,i))/Z\n"
00116                         "where s(z)=log(1+exp(z)) is the softplus function, and g is a monotonic function\n"
00117                         "in y whose first derivative and indefinite integral are known analytically.\n"
00118                         "The u_i are fixed from the unconditional distribution, such that s(b_i)=1 gives\n"
00119                         "approximately the right unconditional cumulative function (for infinite hardness):\n"
00120                         "  u_i = P(mu_{i-1}<Y<=mu_i) [unconditional].\n"
00121                         "The parameters theta of Y's distribution are (a,b_1,b_2,...,c_1,c_2,...,mu_1,mu_2,...),\n"
00122                         "which are obtained as the unconstrained outputs (no output transfer function) of a neural network.\n"
00123                         "The normalization constant Z is computed analytically easily: (separate_mass_point=false)\n"
00124                         "   Z = s(a) + sum_i u_i s(b_i) g(y,theta,i)\n"
00125                         "or for separate_mass_point=true:\n"
00126                         "   Z = sum_i s(b_i) (g(y,theta,i)-g(0,theta,i))\n"
00127                         "The current implementation considers two choices for g:\n"
00128                         "  - sigmoid_steps: g(y,theta,i) = sigmoid(h*s(c_i)*(y-mu_i)/(mu_{i+1}-mu_i))\n"
00129                         "  - sloped_steps: g(y,theta,i) = 1 + s(s(c_i)*(mu_i-y))-s(s(c_i)*(mu_{i+1}-y))/(s(c_i)*(mu_{i+1}-mu_i))\n"
00130                         "where h is the 'initial_hardness' option.\n"
00131                         "The density is analytically obtained using the derivative g' of g and\n"
00132                         "expected value is analytically obtained using the primitive G of g.\n"
00133                         "For the mass point at the origin,\n"
00134                         "   P(Y=0|theta) = P(Y<=0|theta).\n"
00135                         "(which is simply sigmoid(a) if separate_mass_point).\n"
00136                         "For positive values of Y: (separate_mass_point=false)\n"
00137                         "   p(y|theta) = (1/Z) sum_i s(b_i) g'(y,theta,i).\n"
00138                         "or for separate_mass_point=true:\n"
00139                         "   p(y|theta) = (1-sigmoid(a)) (1/Z) sum_i s(b_i) g'(y,theta,i).\n"
00140                         "And the expected value of Y is obtained using the primitive: (separate_mass_point=false)\n"
00141                         "   E[Y|theta] = (1/Z)*s(a)*M + sum_i u_i s(b_i)(G(M,theta,i)-G(0,theta,i)))\n"
00142                         "or for separate_mass_point=true:\n"
00143                         "   E[Y|theta] = M - ((sigmoid(a)-(1-sigmoid(a)*(1/Z)*sum_i u_i s(b_i)g(0,theta,i))*M + (1-sigmoid(a))*(1/Z)*sum_i u_i s(b_i)(G(M,theta,i)-G(0,theta,0)))\n"
00144                         "Training the model can be done by maximum likelihood (minimizing the log of the\n"
00145                         "density) or by minimizing the average of squared error (y-E[Y|theta])^2\n"
00146                         "or a combination of the two (with the max_likelihood_vs_squared_error_balance option).\n"
00147                         "The step 'centers' mu_i are initialized according to some rule, in the interval [0,maxY]:\n"
00148                         " - uniform: at regular intervals in [0,maxY]\n"
00149                         " - log-scale: as the exponential of values at regular intervals in [0,log(1+maxY)], minus 1.\n"
00150                         "The c_i and b_i are initialized to inverse_softplus(1), and a using the empirical unconditional P(Y=0).\n"
00151                         "For the output curve options (outputs_def='L',D','C', or 'S'), the lower_bound and upper_bound\n"
00152                         "options of PDistribution are automatically set to 0 and maxY respectively.\n"
00153     );
00154 
00155 void ConditionalDensityNet::declareOptions(OptionList& ol)
00156 {
00157     declareOption(ol, "nhidden", &ConditionalDensityNet::nhidden, OptionBase::buildoption, 
00158                   "    number of hidden units in first hidden layer (0 means no hidden layer)\n");
00159 
00160     declareOption(ol, "nhidden2", &ConditionalDensityNet::nhidden2, OptionBase::buildoption, 
00161                   "    number of hidden units in second hidden layer (0 means no hidden layer)\n");
00162 
00163     declareOption(ol, "weight_decay", &ConditionalDensityNet::weight_decay, OptionBase::buildoption, 
00164                   "    global weight decay for all layers\n");
00165 
00166     declareOption(ol, "bias_decay", &ConditionalDensityNet::bias_decay, OptionBase::buildoption, 
00167                   "    global bias decay for all layers\n");
00168 
00169     declareOption(ol, "layer1_weight_decay", &ConditionalDensityNet::layer1_weight_decay, OptionBase::buildoption, 
00170                   "    Additional weight decay for the first hidden layer.  Is added to weight_decay.\n");
00171     declareOption(ol, "layer1_bias_decay", &ConditionalDensityNet::layer1_bias_decay, OptionBase::buildoption, 
00172                   "    Additional bias decay for the first hidden layer.  Is added to bias_decay.\n");
00173 
00174     declareOption(ol, "layer2_weight_decay", &ConditionalDensityNet::layer2_weight_decay, OptionBase::buildoption, 
00175                   "    Additional weight decay for the second hidden layer.  Is added to weight_decay.\n");
00176 
00177     declareOption(ol, "layer2_bias_decay", &ConditionalDensityNet::layer2_bias_decay, OptionBase::buildoption, 
00178                   "    Additional bias decay for the second hidden layer.  Is added to bias_decay.\n");
00179 
00180     declareOption(ol, "output_layer_weight_decay", &ConditionalDensityNet::output_layer_weight_decay, OptionBase::buildoption, 
00181                   "    Additional weight decay for the output layer.  Is added to 'weight_decay'.\n");
00182 
00183     declareOption(ol, "output_layer_bias_decay", &ConditionalDensityNet::output_layer_bias_decay, OptionBase::buildoption, 
00184                   "    Additional bias decay for the output layer.  Is added to 'bias_decay'.\n");
00185 
00186     declareOption(ol, "direct_in_to_out_weight_decay", &ConditionalDensityNet::direct_in_to_out_weight_decay, OptionBase::buildoption, 
00187                   "    Additional weight decay for the direct in-to-out layer.  Is added to 'weight_decay'.\n");
00188 
00189     declareOption(ol, "penalty_type", &ConditionalDensityNet::penalty_type,
00190                   OptionBase::buildoption,
00191                   "Penalty to use on the weights (for weight and bias decay).\n"
00192                   "Can be any of:\n"
00193                   "  - \"L1\": L1 norm,\n"
00194                   "  - \"L1_square\": square of the L1 norm,\n"
00195                   "  - \"L2_square\" (default): square of the L2 norm.\n");
00196 
00197     declareOption(ol, "L1_penalty", &ConditionalDensityNet::L1_penalty, OptionBase::buildoption,
00198                   "Deprecated - You should use \"penalty_type\" instead\n"
00199                   "should we use L1 penalty instead of the default L2 penalty on the weights?\n");
00200 
00201     declareOption(ol, "direct_in_to_out", &ConditionalDensityNet::direct_in_to_out, OptionBase::buildoption, 
00202                   "    should we include direct input to output connections? (default=0)\n");
00203 
00204     declareOption(ol, "optimizer", &ConditionalDensityNet::optimizer, OptionBase::buildoption, 
00205                   "    specify the optimizer to use\n");
00206 
00207     declareOption(ol, "batch_size", &ConditionalDensityNet::batch_size, OptionBase::buildoption, 
00208                   "    how many samples to use to estimate the avergage gradient before updating the weights\n"
00209                   "    0 is equivalent to specifying training_set->length(); default=1 (stochastic gradient)\n");
00210 
00211     declareOption(ol, "maxY", &ConditionalDensityNet::maxY, OptionBase::buildoption, 
00212                   "    maximum allowed value for Y. Default = 1.0 (data normalized in [0,1]\n");
00213 
00214     declareOption(ol, "thresholdY", &ConditionalDensityNet::thresholdY, OptionBase::buildoption, 
00215                   "    threshold value of Y for which we might want to compute P(Y>thresholdY), with outputs_def='t'\n");
00216 
00217     declareOption(ol, "log_likelihood_vs_squared_error_balance", &ConditionalDensityNet::log_likelihood_vs_squared_error_balance, 
00218                   OptionBase::buildoption, 
00219                   "    Relative weight given to negative log-likelihood (1- this weight given squared error). Default=1\n");
00220 
00221     declareOption(ol, "n_output_density_terms", &ConditionalDensityNet::n_output_density_terms, 
00222                   OptionBase::buildoption, 
00223                   "    Number of terms (steps) in the output density function.\n");
00224 
00225     declareOption(ol, "steps_type", &ConditionalDensityNet::steps_type, 
00226                   OptionBase::buildoption, 
00227                   "    The type of steps used to build the cumulative distribution.\n"
00228                   "    Allowed values are:\n"
00229                   "      - sigmoid_steps: g(y,theta,i) = sigmoid(s(c_i)*(y-mu_i))\n"
00230                   "      - sloped_steps: g(y,theta,i) = s(s(c_i)*(mu_i-y))-s(s(c_i)*(mu_i-y))\nDefault=sloped_steps\n");
00231 
00232     declareOption(ol, "centers_initialization", &ConditionalDensityNet::centers_initialization, 
00233                   OptionBase::buildoption, 
00234                   "    How to initialize the step centers (mu_i). Allowed values are:\n"
00235                   "      - data: from the data at regular quantiles, with last one at maxY (default)\n"
00236                   "      - uniform: at regular intervals in [0,maxY]\n"
00237                   "      - log-scale: as the exponential of values at regular intervals in log-scale, using formula:\n"
00238                   "          i-th position = (exp(scale*(i+1-n_output_density_terms)/n_output_density_terms)-exp(-scale))/(1-exp(-scale))\n");
00239     declareOption(ol, "curve_positions", &ConditionalDensityNet::curve_positions,
00240                   OptionBase::buildoption, 
00241                   "    How to choose the y-values for the probability curve (upper case output_def):\n"
00242                   "      - uniform: at regular intervals in [0,maxY]\n"
00243                   "      - log-scale: as the exponential of values at regular intervals in log-scale, using formula:\n"
00244                   "          i-th position = (exp(scale*(i+1-n_output_density_terms)/n_output_density_terms)-exp(-scale))/(1-exp(-scale))\n");
00245     declareOption(ol, "scale", &ConditionalDensityNet::scale,
00246                   OptionBase::buildoption, 
00247                   "    scale used in the log-scale formula for centers_initialization and curve_positions");
00248 
00249     declareOption(ol, "unconditional_p0", &ConditionalDensityNet::unconditional_p0, OptionBase::buildoption, 
00250                   "    approximate unconditional probability of Y=0 (mass point), used\n"
00251                   "    to initialize the parameters.\n");
00252 
00253     declareOption(ol, "mu_is_fixed", &ConditionalDensityNet::mu_is_fixed, OptionBase::buildoption, 
00254                   "    whether to keep the step centers (mu[i]) fixed or to learn them.\n");
00255 
00256     declareOption(ol, "separate_mass_point", &ConditionalDensityNet::separate_mass_point, OptionBase::buildoption, 
00257                   "    whether to model separately the mass point at the origin.\n");
00258 
00259     declareOption(ol, "initial_hardness", &ConditionalDensityNet::initial_hardness, OptionBase::buildoption, 
00260                   "    value that scales softplus(c).\n");
00261 
00262     declareOption(ol, "c_penalization", &ConditionalDensityNet::c_penalization, OptionBase::buildoption, 
00263                   "    the penalization coefficient for the 'c' output of the neural network");
00264 
00265     declareOption(ol, "generate_precision", &ConditionalDensityNet::generate_precision, OptionBase::buildoption, 
00266                   "    precision when generating a new sample\n");
00267 
00268     declareOption(ol, "paramsvalues", &ConditionalDensityNet::paramsvalues, OptionBase::learntoption, 
00269                   "    The learned neural network parameter vector\n");
00270 
00271     declareOption(ol, "unconditional_cdf", &ConditionalDensityNet::unconditional_cdf, OptionBase::learntoption, 
00272                   "    Unconditional cumulative distribution function.\n");
00273 
00274     declareOption(ol, "unconditional_delta_cdf", &ConditionalDensityNet::unconditional_delta_cdf, OptionBase::learntoption, 
00275                   "    Variations of the cdf from one step center to the next (this is u_i in above eqns).\n");
00276 
00277     declareOption(ol, "mu", &ConditionalDensityNet::mu, OptionBase::learntoption, 
00278                   "    Step centers.\n");
00279 
00280     declareOption(ol, "y_values", &ConditionalDensityNet::y_values, OptionBase::learntoption, 
00281                   "    Values of Y at which the cumulative (or density or survival) curves are computed if required.\n");
00282 
00283     inherited::declareOptions(ol);
00284 }
00285 
00286 /*
00287   int ConditionalDensityNet::outputsize() const
00288   {
00289   int target_size = targetsize_<0?(train_set?train_set->targetsize():1):targetsize_;
00290   int l=0;
00291   for (unsigned int i=0;i<outputs_def.length();i++)
00292   if (outputs_def[i]=='L' || outputs_def[i]=='D' || outputs_def[i]=='C' || outputs_def[i]=='S')
00293   l+=n_curve_points;
00294   else if (outputs_def[i]=='e')
00295   l+=target_size;
00296   else if (outputs_def[i]=='v') // by default assume variance is full nxn matrix 
00297   l+=target_size*target_size;
00298   else l++;
00299   return l;
00300   }
00301 */
00302 
00304 // build_ //
00306 void ConditionalDensityNet::build_()
00307 {
00308     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00309     {
00310         lower_bound = 0;
00311         upper_bound = maxY;
00312         int n_output_parameters = mu_is_fixed?(1+n_output_density_terms*2):(1+n_output_density_terms*3);
00313 
00314         if (n_curve_points<0)
00315             n_curve_points = n_output_density_terms+1;
00316 
00317         // init. basic vars
00318         input = Var(n_input, "input");
00319         output = input;
00320         params.resize(0);
00321 
00322         // first hidden layer
00323         if(nhidden>0)
00324         {
00325             w1 = Var(1+n_input, nhidden, "w1");      
00326             output = tanh(affine_transform(output,w1));
00327             params.append(w1);
00328         }
00329 
00330         // second hidden layer
00331         if(nhidden2>0)
00332         {
00333             w2 = Var(1+nhidden, nhidden2, "w2");
00334             output = tanh(affine_transform(output,w2));
00335             params.append(w2);
00336         }
00337 
00338         if (nhidden2>0 && nhidden==0)
00339             PLERROR("ConditionalDensityNet:: can't have nhidden2 (=%d) > 0 while nhidden=0",nhidden2);
00340 
00341         if (nhidden==-1) 
00342             // special code meaning that the inputs should be ignored, only use biases
00343         {
00344             wout = Var(1, n_output_parameters, "wout");
00345             output = transpose(wout);
00346         }
00347         // output layer before transfer function
00348         else
00349         {
00350             wout = Var(1+output->size(), n_output_parameters, "wout");
00351             output = affine_transform(output,wout);
00352         }
00353         params.append(wout);
00354 
00355         // direct in-to-out layer
00356         if(direct_in_to_out)
00357         {
00358             wdirect = Var(n_input, n_output_parameters, "wdirect");
00359             //wdirect = Var(1+inputsize(), n_output_parameters, "wdirect");
00360             output += transposeProduct(wdirect, input);// affine_transform(input,wdirect);
00361             params.append(wdirect);
00362         }
00363 
00364         /*
00365          * target and weights
00366          */
00367 
00368         target = Var(n_target, "target");
00369 
00370         if(weightsize_>0)
00371         {
00372             if (weightsize_!=1)
00373                 PLERROR("ConditionalDensityNet: expected weightsize to be 1 or 0 (or unspecified = -1, meaning 0), got %d",weightsize_);
00374             sampleweight = Var(1, "weight");
00375         }
00376         // output = parameters of the Y distribution
00377 
00378         int i=0;
00379         a = output[i++]; a->setName("a");
00380         //b = new SubMatVariable(output,0,i,1,n_output_density_terms); 
00381         b = new SubMatVariable(output,i,0,n_output_density_terms,1);
00382         b->setName("b");
00383         i+=n_output_density_terms;
00384         //c = new SubMatVariable(output,0,i,1,n_output_density_terms);
00385         c = new SubMatVariable(output,i,0,n_output_density_terms,1);
00386         c->setName("c");
00387 
00388         // we don't want to clear mu if this build is called
00389         // just after a load(), because mu is a learnt option
00390         if (!mu || (mu->length()!=n_output_density_terms && train_set))
00391         {
00392             if (mu_is_fixed)
00393                 //mu = Var(1,n_output_density_terms);
00394                 mu = Var(n_output_density_terms,1);
00395             else
00396             {
00397                 i+=n_output_density_terms;
00398                 //mu = new SubMatVariable(output,0,i,1,n_output_density_terms);
00399                 mu = new SubMatVariable(output,i,0,n_output_density_terms,1);
00400             }
00401         }
00402         mu->setName("mu");
00403 
00404         /*
00405          * output density
00406          */
00407         Var nll; // negative log likelihood
00408         Var max_y = var(maxY); 
00409         Var left_side = vconcat(var(0.0) & (new SubMatVariable(mu,0,0,n_output_density_terms-1,1)));
00410         centers = target-mu; 
00411         centers_M = max_y-mu;
00412         unconditional_cdf.resize(n_output_density_terms);
00413         if (unconditional_delta_cdf)
00414         {
00415             // don't clear it if this build is called just after a load
00416             if (unconditional_delta_cdf.length()!=n_output_density_terms)
00417                 unconditional_delta_cdf->resize(n_output_density_terms,1);
00418         }
00419         else
00420             unconditional_delta_cdf = Var(n_output_density_terms,1);
00421         initial_hardnesses = var(initial_hardness) / (mu - left_side);
00422         pos_b = softplus(b)*unconditional_delta_cdf; 
00423         pos_c = softplus(c)*initial_hardnesses; 
00424         Var scaled_centers = pos_c*centers;
00425         // scaled centers evaluated at target = M
00426         Var scaled_centers_M = pos_c*centers_M;
00427         // scaled centers evaluated at target = 0
00428         Var scaled_centers_0 = -pos_c*mu;
00429         Var lhopital, inverse_denominator, density_numerator;
00430         if (separate_mass_point)
00431         {
00432             pos_a = sigmoid(a); 
00433             if (steps_type=="sigmoid_steps")
00434             {
00435                 steps = sigmoid(scaled_centers); 
00436                 // steps evaluated at target = M
00437                 steps_M = sigmoid(scaled_centers_M);
00438                 steps_0 = sigmoid(scaled_centers_0);
00439                 // derivative of steps wrt target
00440                 steps_gradient = pos_c*steps*(1-steps);
00441                 steps_integral = (softplus(scaled_centers_M) - softplus(scaled_centers_0))/pos_c;
00442                 delta_steps = centers_M*steps_M + mu*sigmoid(scaled_centers_0);
00443             }
00444             else if (steps_type=="sloped_steps")
00445             {
00446                 steps = soft_slope(target, pos_c, left_side, mu);
00447                 steps_M = soft_slope(max_y, pos_c, left_side, mu);
00448                 steps_0 = soft_slope(var(0.0), pos_c, left_side, mu);
00449                 steps_gradient = d_soft_slope(target, pos_c, left_side, mu);
00450                 steps_integral = soft_slope_integral(pos_c,left_side,mu,0.0,maxY);
00451                 delta_steps = soft_slope_limit(target, pos_c, left_side, mu);
00452             }
00453             else PLERROR("ConditionalDensityNet::build, steps_type option value unknown: %s",steps_type.c_str());
00454 
00455             density_numerator = dot(pos_b,steps_gradient);
00456             cum_denominator = dot(pos_b,positive(steps_M-steps_0));
00457             inverse_denominator = 1.0/cum_denominator;
00458             cum_numerator = dot(pos_b,(steps-steps_0));
00459             cumulative = pos_a + (1-pos_a) * cum_numerator * inverse_denominator;
00460             density = density_numerator * inverse_denominator; // this is the conditional density for Y>0
00461             // apply l'hopital rule if pos_c --> 0 to avoid blow-up (N.B. lim_{pos_c->0} pos_b/pos_c*steps_integral = pos_b*delta_steps)
00462             lhopital = ifThenElse(isAboveThreshold(pos_c,1e-20),steps_integral,delta_steps); 
00463             expected_value = max_y - ((pos_a-(1-pos_a)*inverse_denominator*dot(pos_b,steps_0))*max_y + 
00464                                       (1-pos_a)*dot(pos_b,lhopital)*inverse_denominator);
00465             mass_cost = -log(ifThenElse(isAboveThreshold(target,0.0,1,0,true),(1-pos_a),pos_a)); 
00466             pos_y_cost = ifThenElse(isAboveThreshold(target,0.0,1,0,true),-log(density),var(0.0)); 
00467             nll = -log(ifThenElse(isAboveThreshold(target,0.0,1,0,true),density*(1-pos_a),pos_a));
00468         }
00469         else
00470         {
00471             pos_a = var(0.0);
00472             if (steps_type=="sigmoid_steps")
00473             {
00474                 steps = sigmoid(scaled_centers); 
00475                 // steps evaluated at target = M
00476                 steps_M = sigmoid(scaled_centers_M);
00477                 steps_0 = sigmoid(scaled_centers_0);
00478                 // derivative of steps wrt target
00479                 steps_gradient = pos_c*steps*(1-steps);
00480                 steps_integral = (softplus(scaled_centers_M) - softplus(scaled_centers_0))/pos_c;
00481                 delta_steps = centers_M*steps_M + mu*sigmoid(scaled_centers_0);
00482             }
00483             else if (steps_type=="sloped_steps")
00484             {
00485                 steps = soft_slope(target, pos_c, left_side, mu);
00486                 steps_M = soft_slope(max_y, pos_c, left_side, mu);
00487                 steps_0 = soft_slope(var(0.0), pos_c, left_side, mu);
00488                 steps_gradient = d_soft_slope(target, pos_c, left_side, mu);
00489                 steps_integral = soft_slope_integral(pos_c,left_side,mu,0.0,maxY);
00490                 delta_steps = soft_slope_limit(target, pos_c, left_side, mu);
00491             }
00492             else PLERROR("ConditionalDensityNet::build, steps_type option value unknown: %s",steps_type.c_str());
00493 
00494             density_numerator = dot(pos_b,steps_gradient);
00495             cum_denominator = dot(pos_b,steps_M - steps_0); 
00496             inverse_denominator = 1.0/cum_denominator;
00497             cum_numerator = dot(pos_b,steps - steps_0);
00498             cumulative = cum_numerator * inverse_denominator;
00499             density = density_numerator * inverse_denominator;
00500             // apply l'hopital rule if pos_c --> 0 to avoid blow-up (N.B. lim_{pos_c->0} pos_b/pos_c*steps_integral = pos_b*delta_steps)
00501             lhopital = ifThenElse(isAboveThreshold(pos_c,1e-20),steps_integral,delta_steps); 
00502             expected_value = dot(pos_b,lhopital)*inverse_denominator;
00503             nll = -log(ifThenElse(isAboveThreshold(target,0.0,1,0,true),density,cumulative));
00504         }
00505         max_y->setName("maxY");
00506         left_side->setName("left_side"); 
00507         pos_a->setName("pos_a");
00508         pos_b->setName("pos_b");
00509         pos_c->setName("pos_c");
00510         steps->setName("steps");
00511         steps_M->setName("steps_M");
00512         steps_integral->setName("steps_integral");
00513         expected_value->setName("expected_value");
00514         density_numerator->setName("density_numerator");
00515         cum_denominator->setName("cum_denominator");
00516         inverse_denominator->setName("inverse_denominator");
00517         cum_numerator->setName("cum_numerator");
00518         cumulative->setName("cumulative");
00519         density->setName("density");
00520         lhopital->setName("lhopital");
00521     
00522         /*
00523          * cost functions:
00524          *   training_criterion = log_likelihood_vs_squared_error_balance*neg_log_lik
00525          *                      +(1-log_likelihood_vs_squared_error_balance)*squared_err
00526          *                      +penalties
00527          *   neg_log_lik = -log(1_{target=0} cumulative + 1_{target>0} density)
00528          *   squared_err = square(target - expected_value)
00529          */
00530         costs.resize(3);
00531       
00532         costs[1] = nll;
00533         costs[2] = square(target-expected_value);
00534         // for debugging gradient computation error
00535         if (fast_exact_is_equal(log_likelihood_vs_squared_error_balance, 1))
00536             costs[0] = costs[1];
00537         else if (fast_exact_is_equal(log_likelihood_vs_squared_error_balance, 0))
00538             costs[0] = costs[2];
00539         else costs[0] = log_likelihood_vs_squared_error_balance*costs[1]+
00540                  (1-log_likelihood_vs_squared_error_balance)*costs[2];
00541         if (c_penalization > 0) {
00542             costs[0] = costs[0] + c_penalization * sumsquare(c);
00543         }
00544     
00545         // for debugging
00546         //costs[0] = mass_cost + pos_y_cost;
00547         //costs[1] = mass_cost;
00548         //costs[2] = pos_y_cost;
00549 
00550         /*
00551          * weight and bias decay penalty
00552          */
00553         if( L1_penalty )
00554         {
00555             PLDEPRECATED("Option \"L1_penalty\" deprecated. Please use \"penalty_type = L1\" instead.");
00556             L1_penalty = 0;
00557             penalty_type = "L1";
00558         }
00559 
00560         string pt = lowerstring( penalty_type );
00561         if( pt == "l1" )
00562             penalty_type = "L1";
00563         else if( pt == "l1_square" || pt == "l1 square" || pt == "l1square" )
00564             penalty_type = "L1_square";
00565         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00566             penalty_type = "L2_square";
00567         else if( pt == "l2" )
00568         {
00569             PLWARNING("L2 penalty not supported, assuming you want L2 square");
00570             penalty_type = "L2_square";
00571         }
00572         else
00573             PLERROR("penalty_type \"%s\" not supported", penalty_type.c_str());
00574 
00575         // create penalties
00576         penalties.resize(0);  // prevents penalties from being added twice by consecutive builds
00577         if(w1 && (!fast_exact_is_equal(layer1_weight_decay + weight_decay, 0) ||
00578                   !fast_exact_is_equal(layer1_bias_decay   + bias_decay,   0)))
00579             penalties.append(affine_transform_weight_penalty(w1, (layer1_weight_decay + weight_decay), (layer1_bias_decay + bias_decay), penalty_type));
00580         if(w2 && (!fast_exact_is_equal(layer2_weight_decay + weight_decay, 0) ||
00581                   !fast_exact_is_equal(layer2_bias_decay   + bias_decay,   0)))
00582             penalties.append(affine_transform_weight_penalty(w2, (layer2_weight_decay + weight_decay), (layer2_bias_decay + bias_decay), penalty_type));
00583         if(wout && (!fast_exact_is_equal(output_layer_weight_decay + weight_decay, 0) ||
00584                     !fast_exact_is_equal(output_layer_bias_decay   + bias_decay,   0)))
00585             penalties.append(affine_transform_weight_penalty(wout, (output_layer_weight_decay + weight_decay), 
00586                                                              (output_layer_bias_decay + bias_decay), penalty_type));
00587         if(wdirect && !fast_exact_is_equal(direct_in_to_out_weight_decay + weight_decay, 0))
00588         {
00589             if (penalty_type == "L1_square")
00590                 penalties.append(square(sumabs(wdirect))*(direct_in_to_out_weight_decay + weight_decay));
00591             else if (penalty_type == "L1")
00592                 penalties.append(sumabs(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00593             else if (penalty_type == "L2_square")
00594                 penalties.append(sumsquare(wdirect)*(direct_in_to_out_weight_decay + weight_decay));
00595         }
00596 
00597         test_costs = hconcat(costs);
00598 
00599         // apply penalty to cost
00600         if(penalties.size() != 0) {
00601             // only multiply by sampleweight if there are weights
00602             if (weightsize_>0)
00603                 training_cost = hconcat(sampleweight*sum(hconcat(costs[0] & penalties))
00604                                         & (test_costs*sampleweight));
00605             else {
00606                 training_cost = hconcat(sum(hconcat(costs[0] & penalties)) & test_costs);
00607             }
00608         }
00609         else {
00610             // only multiply by sampleweight if there are weights
00611             if(weightsize_>0) {
00612                 training_cost = test_costs*sampleweight;
00613             } else {
00614                 training_cost = test_costs;
00615             }
00616         }
00617   
00618         training_cost->setName("training_cost");
00619         test_costs->setName("test_costs");
00620         output->setName("output");
00621       
00622         // Shared values hack...
00623         bool use_paramsvalues=(bool)paramsvalues && (paramsvalues.size() == params.nelems());
00624         if(use_paramsvalues)
00625         {
00626             params << paramsvalues;
00627             initialize_mu(mu->value);
00628         }
00629         else
00630         {
00631             paramsvalues.resize(params.nelems());
00632             initializeParams();
00633         }
00634         params.makeSharedValue(paramsvalues);
00635 
00636         VarArray output_and_target = output & target;
00637         output_and_target_values.resize(output.length()+target.length());
00638         output_and_target.makeSharedValue(output_and_target_values);
00639 
00640         cdf_f = Func(output_and_target,cumulative);
00641         mean_f = Func(output,expected_value);
00642         density_f = Func(output_and_target,density);
00643   
00644         // Funcs
00645         VarArray outvars;
00646         VarArray testinvars;
00647         invars.resize(0);
00648         if(input)
00649         {
00650             invars.push_back(input);
00651             testinvars.push_back(input);
00652         }
00653         if(expected_value)
00654         {
00655             outvars.push_back(expected_value);
00656         }
00657         if(target)
00658         {
00659             invars.push_back(target);
00660             testinvars.push_back(target);
00661             outvars.push_back(target);
00662         }
00663         if(sampleweight)
00664         {
00665             invars.push_back(sampleweight);
00666         }
00667   
00668         VarArray outputs_array;
00669 
00670         for (unsigned int k=0;k<outputs_def.length();k++)
00671         {
00672             if (outputs_def[k]=='e')
00673                 outputs_array &= expected_value;
00674             else if (outputs_def[k]=='t')
00675             {
00676                 Func survival_f(target&output,var(1.0)-cumulative);
00677                 Var threshold_y(1,1);
00678                 threshold_y->valuedata[0]=thresholdY;
00679                 outputs_array &= survival_f(threshold_y & output);
00680             }
00681             else if (outputs_def[k]=='S' || outputs_def[k]=='C' ||
00682                      outputs_def[k]=='L' || outputs_def[k]=='D')
00683             {
00684                 Func prob_f(target&output,outputs_def[k]=='S'?(var(1.0)-cumulative):
00685                             (outputs_def[k]=='C'?cumulative:
00686                              (outputs_def[k]=='D'?density:log(density))));
00687                 y_values.resize(n_curve_points);
00688                 if (curve_positions=="uniform")
00689                 {
00690                     real delta = maxY/(n_curve_points-1);
00691                     for (int j=0;j<n_curve_points;j++)
00692                     {
00693                         y_values[j] = var(j*delta);
00694                         y_values[j]->setName("y"+tostring(j));
00695                         outputs_array &= prob_f(y_values[j] & output);
00696                     }
00697                 } else // log-scale
00698                 {
00699                     real denom = 1.0/(1-exp(-scale));
00700                     for (int j=0;j<n_curve_points;j++)
00701                     {
00702                         y_values[j] = var((exp(scale*(j-n_output_density_terms)/n_output_density_terms)-exp(-scale))*denom);
00703                         y_values[j]->setName("y"+tostring(j));
00704                         outputs_array &= prob_f(y_values[j] & output);
00705                     }    
00706                 }
00707             } else
00708                 outputs_array &= expected_value;
00709 //          PLERROR("ConditionalDensityNet::build: can't handle outputs_def with option value = %c",outputs_def[k]);
00710         }
00711         outputs = hconcat(outputs_array);
00712         if (mu_is_fixed)
00713             f = Func(input, params&mu, outputs);
00714         else
00715             f = Func(input, params, outputs);
00716         f->recomputeParents();
00717 
00718         in2distr_f = Func(input,pos_a);
00719         in2distr_f->recomputeParents();
00720 
00721         if (mu_is_fixed)
00722             test_costf = Func(testinvars, params&mu, outputs&test_costs);
00723         else
00724             test_costf = Func(testinvars, params, outputs&test_costs);
00725 
00726         if (use_paramsvalues)
00727             test_costf->recomputeParents();
00728     }
00729     // PDistribution::finishConditionalBuild();
00730 }
00731 
00732 
00733 /* TODO Remove (?)
00734    void ConditionalDensityNet::computeOutput(const Vec& inputv, Vec& outputv) const
00735    {
00736    f->fprop(inputv,outputv);
00737    }
00738 */
00739 
00740 /* TODO Remove (?)
00741    void ConditionalDensityNet::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
00742    Vec& outputv, Vec& costsv) const
00743    {
00744    test_costf->fprop(inputv&targetv, outputv&costsv);
00745    }
00746 */
00747 
00748 TVec<string> ConditionalDensityNet::getTrainCostNames() const
00749 {
00750     if (penalties.size() > 0)
00751     {
00752         TVec<string> cost_funcs(4);
00753         cost_funcs[0]="training_criterion+penalty";
00754         cost_funcs[1]="training_criterion";
00755         cost_funcs[2]="NLL";
00756         cost_funcs[3]="mse";
00757         return cost_funcs;
00758     }
00759     else return getTestCostNames();
00760 }
00761 
00762 /*
00763   TVec<string> ConditionalDensityNet::getTestCostNames() const
00764   { 
00765   TVec<string> cost_funcs(3);
00766   cost_funcs[0]="training_criterion";
00767   cost_funcs[1]="NLL";
00768   cost_funcs[2]="mse";
00769   return cost_funcs;
00770   }
00771 */
00772 
00773 // ### Nothing to add here, simply calls build_
00774 void ConditionalDensityNet::build()
00775 {
00776     inherited::build();
00777     build_();
00778 }
00779 
00780 #ifdef __INTEL_COMPILER
00781 #pragma warning(disable:1419)  // Get rid of compiler warning.
00782 #endif
00783 extern void varDeepCopyField(Var& field, CopiesMap& copies);
00784 #ifdef __INTEL_COMPILER
00785 #pragma warning(default:1419)
00786 #endif
00787 
00788 
00789 void ConditionalDensityNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00790 {
00791     inherited::makeDeepCopyFromShallowCopy(copies);
00792     varDeepCopyField(input, copies);
00793     varDeepCopyField(target, copies);
00794     varDeepCopyField(sampleweight, copies);
00795     varDeepCopyField(w1, copies);
00796     varDeepCopyField(w2, copies);
00797     varDeepCopyField(wout, copies);
00798     varDeepCopyField(wdirect, copies);
00799     varDeepCopyField(output, copies);
00800     varDeepCopyField(outputs, copies);
00801     varDeepCopyField(a, copies);
00802     varDeepCopyField(pos_a, copies);
00803     varDeepCopyField(b, copies);
00804     varDeepCopyField(pos_b, copies);
00805     varDeepCopyField(c, copies);
00806     varDeepCopyField(pos_c, copies);
00807     varDeepCopyField(density, copies);
00808     varDeepCopyField(cumulative, copies);
00809     varDeepCopyField(expected_value, copies);
00810     deepCopyField(costs, copies);
00811     deepCopyField(penalties, copies);
00812     varDeepCopyField(training_cost, copies);
00813     varDeepCopyField(test_costs, copies);
00814     deepCopyField(invars, copies);
00815     deepCopyField(params, copies);
00816     deepCopyField(paramsvalues, copies);
00817     varDeepCopyField(centers, copies);
00818     varDeepCopyField(centers_M, copies);
00819     varDeepCopyField(steps, copies);
00820     varDeepCopyField(steps_M, copies);
00821     varDeepCopyField(steps_0, copies);
00822     varDeepCopyField(steps_gradient, copies);
00823     varDeepCopyField(steps_integral, copies);
00824     varDeepCopyField(delta_steps, copies);
00825     varDeepCopyField(cum_numerator, copies);
00826     varDeepCopyField(cum_denominator, copies);
00827     deepCopyField(unconditional_cdf, copies);
00828     varDeepCopyField(unconditional_delta_cdf, copies);
00829     varDeepCopyField(initial_hardnesses, copies);
00830     varDeepCopyField(prev_centers, copies);
00831     varDeepCopyField(prev_centers_M, copies);
00832     varDeepCopyField(scaled_prev_centers, copies);
00833     varDeepCopyField(scaled_prev_centers_M, copies);
00834     varDeepCopyField(minus_prev_centers_0, copies);
00835     varDeepCopyField(minus_scaled_prev_centers_0, copies);
00836     deepCopyField(y_values, copies);
00837     varDeepCopyField(mu, copies);
00838     deepCopyField(f, copies);
00839     deepCopyField(test_costf, copies);
00840     deepCopyField(output_and_target_to_cost, copies);
00841     deepCopyField(cdf_f, copies);
00842     deepCopyField(mean_f, copies);
00843     deepCopyField(density_f, copies);
00844     deepCopyField(in2distr_f, copies);
00845     deepCopyField(output_and_target, copies);
00846     deepCopyField(output_and_target_values, copies);
00847     varDeepCopyField(totalcost, copies);
00848     varDeepCopyField(mass_cost, copies);
00849     varDeepCopyField(pos_y_cost, copies);
00850     deepCopyField(optimizer, copies);
00851 }
00852 
00853 void ConditionalDensityNet::setInput(const Vec& in) const
00854 {
00855 #ifdef BOUNDCHECK
00856     if (!f)
00857         PLERROR("ConditionalDensityNet:setInput: build was not completed (maybe because training set was not provided)!");
00858 #endif
00859     in2distr_f->fprop(in,pos_a->value);
00860 }
00861 
00862 real ConditionalDensityNet::log_density(const Vec& y) const
00863 { 
00864     static Vec d;
00865     d.resize(1);
00866     target->value << y;
00867     density_f->fprop(output_and_target_values, d);
00868     return pl_log(d[0]);
00869 }
00870 
00871 real ConditionalDensityNet::survival_fn(const Vec& y) const
00872 { 
00873     return 1 - cdf(y);
00874 }
00875 
00876 // must be called after setInput
00877 real ConditionalDensityNet::cdf(const Vec& y) const
00878 { 
00879     Vec cum(1);
00880     target->value << y;
00881     cdf_f->fprop(output_and_target_values,cum);
00882 #ifdef BOUNDCHECK
00883     if (cum[0] < -1e-3)
00884         PLERROR("In ConditionalDensityNet::cdf - The cdf is < 0");
00885 #endif
00886     return cum[0];
00887 }
00888 
00889 void ConditionalDensityNet::expectation(Vec& mu) const
00890 { 
00891     mu.resize(n_target);
00892     mean_f->fprop(output->value,mu);
00893 }
00894 
00895 void ConditionalDensityNet::variance(Mat& covar) const
00896 { 
00897     PLERROR("variance not implemented for ConditionalDensityNet"); 
00898 }
00899 
00900 void ConditionalDensityNet::resetGenerator(long g_seed)
00901 { 
00902     manual_seed(g_seed);
00903 }
00904 
00905 void ConditionalDensityNet::generate(Vec& y) const
00906 { 
00907     real u = uniform_sample();
00908     y.resize(1);
00909     if (u<pos_a->value[0]) // mass point
00910     {
00911         y[0]=0;
00912         return;
00913     }
00914     // then find y s.t. P(Y<y|x) = u by binary search
00915     real y0=0;
00916     real y2=maxY;
00917     real delta;
00918     real p;
00919     do 
00920     {
00921         delta = y2 - y0;
00922         y[0] = y0 + delta*0.5;
00923         p = cdf(y);
00924         if (p<u)
00925             // increase y
00926             y0 = y[0];
00927         else
00928             // decrease y
00929             y2 = y[0];
00930     }
00931     while (delta > generate_precision * maxY);
00932 }
00933 
00934 
00935 // Default version of inputsize returns learner->inputsize()
00936 // If this is not appropriate, you should uncomment this and define
00937 // it properly in the .cc
00938 // int ConditionalDensityNet::inputsize() const {}
00939 
00940 void ConditionalDensityNet::initializeParams()
00941 {
00942     if (seed_>=0)
00943         manual_seed(seed_);
00944     else
00945         PLearn::seed();
00946 
00947     //real delta = 1./sqrt(inputsize());
00948     real delta = 1.0 / n_input;
00949     /*
00950       if(direct_in_to_out)
00951       {
00952       //fill_random_uniform(wdirect->value, -delta, +delta);
00953       fill_random_normal(wdirect->value, 0, delta);
00954       //wdirect->matValue(0).clear();
00955       }
00956     */
00957     if(nhidden>0)
00958     {
00959         //fill_random_uniform(w1->value, -delta, +delta);
00960         //delta = 1./sqrt(nhidden);
00961         fill_random_normal(w1->value, 0, delta);
00962         if(direct_in_to_out)
00963         {
00964             //fill_random_uniform(wdirect->value, -delta, +delta);
00965             fill_random_normal(wdirect->value, 0, 0.01*delta);
00966             wdirect->matValue(0).clear();
00967         }
00968         delta = 1.0/nhidden;
00969         w1->matValue(0).clear();
00970     }
00971     if(nhidden2>0)
00972     {
00973         //fill_random_uniform(w2->value, -delta, +delta);
00974         //delta = 1./sqrt(nhidden2);
00975         delta = 0.1/nhidden2;
00976         fill_random_normal(w2->value, 0, delta);
00977         w2->matValue(0).clear();
00978     }
00979     //fill_random_uniform(wout->value, -delta, +delta);
00980     fill_random_normal(wout->value, 0, delta);
00981     // Mat a_weights = wout->matValue.column(0); // Does not seem to be used anymore.
00982     // a_weights *= 3.0; // to get more dynamic range
00983 
00984     if (centers_initialization!="data")
00985     {
00986         Vec output_biases = wout->matValue(0);
00987         Vec mu_;
00988         int i=0;
00989         Vec a_ = output_biases.subVec(i++,1);
00990         Vec b_ = output_biases.subVec(i,n_output_density_terms); i+=n_output_density_terms;
00991         Vec c_ = output_biases.subVec(i,n_output_density_terms); i+=n_output_density_terms;
00992         if (mu_is_fixed)
00993             mu_ = mu->value;
00994         else
00995             mu_ = output_biases.subVec(i,n_output_density_terms); i+=n_output_density_terms;
00996         initialize_mu(mu_);
00997         b_.fill(inverse_softplus(1.0));
00998         c_.fill(inverse_softplus(1.0));
00999         if (separate_mass_point)
01000             a_[0] = unconditional_p0>0?inverse_sigmoid(unconditional_p0):-50;
01001         else a_[0] = -50;
01002         unconditional_delta_cdf->value.fill((1.0-unconditional_p0)/n_output_density_terms);
01003     }
01004 
01005     // Reset optimizer
01006     if(optimizer)
01007         optimizer->reset();
01008 }
01009 
01010 void ConditionalDensityNet::initialize_mu(Vec& mu_)
01011 {
01012     if (centers_initialization=="uniform")
01013     {
01014         real delta=maxY/n_output_density_terms;
01015         real center=delta;
01016         for (int i=0;i<n_output_density_terms;i++,center+=delta)
01017             mu_[i]=center;
01018     } else if (centers_initialization=="log-scale")
01019     {
01020         real denom = 1.0/(1-exp(-scale));
01021         for (int i=0;i<n_output_density_terms;i++)
01022             mu_[i]=(exp(scale*(i+1-n_output_density_terms)/n_output_density_terms)-exp(-scale))*denom;
01023     } else if (centers_initialization!="data")
01024         PLERROR("ConditionalDensityNet::initialize_mu: unknown value %s for centers_initialization option",
01025                 centers_initialization.c_str());
01026 }
01027 
01029 void ConditionalDensityNet::forget()
01030 {
01031     if (train_set) initializeParams();
01032     stage = 0;
01033 }
01034     
01036 void ConditionalDensityNet::train()
01037 {
01038     int i=0, j=0;
01039     if(!train_set)
01040         PLERROR("In ConditionalDensityNet::train, you did not setTrainingSet");
01041     
01042     if(!train_stats)
01043         PLERROR("In ConditionalDensityNet::train, you did not setTrainStatsCollector");
01044 
01045     /*
01046     if (!already_sorted || n_margin > 0)
01047         PLERROR("In ConditionalDensityNet::train - Currently, can only be trained if the data is given as input, target");
01048         */
01049 
01050     if(f.isNull()) // Net has not been properly built yet (because build was called before the learner had a proper training set)
01051         build();
01052 
01053     int l = train_set->length();
01054     int nsamples = batch_size>0 ? batch_size : l;
01055     Func paramf = Func(invars, training_cost); // parameterized function to optimize
01056     Var totalcost = meanOf(train_set, paramf, nsamples);
01057     if(optimizer)
01058     {
01059         optimizer->setToOptimize(params, totalcost);  
01060         optimizer->build();
01061     }
01062 
01063     // number of optimiser stages corresponding to one learner stage (one epoch)
01064     int optstage_per_lstage = l/nsamples;
01065 
01066     ProgressBar* pb = 0;
01067     if(report_progress)
01068         pb = new ProgressBar("Training ConditionalDensityNet from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
01069 
01070     // estimate the unconditional cdf
01071     static real weight;
01072 
01073     if (stage==0)
01074     {
01075         Vec mu_values = mu->value;
01076         unconditional_cdf.clear();
01077         real sum_w=0;
01078         unconditional_p0 = 0;
01079         static StatsCollector sc;
01080         bool init_mu_from_data=centers_initialization=="data";
01081         if (init_mu_from_data)
01082         {
01083             sc.maxnvalues = min(l,100*n_output_density_terms);
01084             sc.build();
01085             sc.forget();
01086         }
01087         Vec tmp1(inputsize());
01088         Vec tmp2(targetsize());
01089         for (i=0;i<l;i++)
01090         {
01091             train_set->getExample(i, tmp1, tmp2, weight);
01092             input->value << tmp1.subVec(0, n_input);
01093             target->value << tmp1.subVec(n_input, n_target);
01094             real y = target->valuedata[0];
01095             if (y < 0)
01096                 PLERROR("In ConditionalDensityNet::train - Found a negative target");
01097             if (y > maxY)
01098                 PLERROR("In ConditionalDensityNet::train - Found a target > maxY");
01099             if (fast_exact_is_equal(y, 0))
01100                 unconditional_p0 += weight;
01101             if (init_mu_from_data)
01102                 sc.update(y,weight);
01103             else
01104                 for (int k=0;k<n_output_density_terms;k++)
01105                     if (y<=mu_values[k]) 
01106                         unconditional_cdf[k] += weight;
01107             sum_w += weight;
01108         }
01109         static Mat cdf;
01110         unconditional_p0 *= 1.0/sum_w;
01111         if (init_mu_from_data)
01112         {
01113             cdf = sc.cdf();
01114             int k=3;
01115             real mean_y = sc.mean();
01116 
01117             real current_mean_fraction = 0;
01118             real prev_cdf = unconditional_p0;
01119             real prev_y = 0;
01120             for (int q=0;q<n_output_density_terms;q++)
01121             {
01122                 real target_fraction = mean_y*(q+1.0)/n_output_density_terms;
01123                 for (;k<cdf.length() && current_mean_fraction < target_fraction;k++)
01124                 {
01125                     current_mean_fraction += (cdf(k,0)+prev_y)*0.5*(cdf(k,1)-prev_cdf);
01126                     prev_cdf = cdf(k,1);
01127                     prev_y = cdf(k,0);
01128                 }
01129                 if (q==n_output_density_terms-1)
01130                 {
01131                     mu_values[q]=maxY;
01132                     unconditional_cdf[q]=1.0;
01133                 }
01134                 else
01135                 {
01136                     mu_values[q]=cdf(k,0);
01137                     unconditional_cdf[q]=cdf(k,1);
01138                 }
01139             }
01140         }
01141         else
01142             for (j=0;j<n_output_density_terms;j++)
01143                 unconditional_cdf[j] *= 1.0/sum_w;
01144 
01145         unconditional_delta_cdf->valuedata[0]=unconditional_cdf[0]-unconditional_p0;
01146         for (i=1;i<n_output_density_terms;i++)
01147             unconditional_delta_cdf->valuedata[i]=unconditional_cdf[i]-unconditional_cdf[i-1];
01148 
01149         // initialize biases based on unconditional distribution
01150         Vec output_biases = wout->matValue(0);
01151         i=0;
01152         Vec a_ = output_biases.subVec(i++,1);
01153         Vec b_ = output_biases.subVec(i,n_output_density_terms); i+=n_output_density_terms;
01154         Vec c_ = output_biases.subVec(i,n_output_density_terms); i+=n_output_density_terms;
01155         Vec mu_;
01156         Vec s_c(n_output_density_terms);
01157         if (mu_is_fixed)
01158             mu_ = mu->value;
01159         else
01160             mu_ = output_biases.subVec(i,n_output_density_terms); i+=n_output_density_terms;
01161         b_.fill(inverse_softplus(1.0));
01162         initialize_mu(mu_);
01163         for (i=0;i<n_output_density_terms;i++)
01164         {
01165             real prev_mu = i==0?0:mu_[i-1];
01166             real delta = mu_[i]-prev_mu;
01167             s_c[i] = delta>0?initial_hardness/delta:-50;
01168             c_[i] = inverse_softplus(1.0);
01169         }
01170 
01171         if (centers_initialization!="data")
01172             unconditional_delta_cdf->value.fill(1.0/n_output_density_terms);
01173         real *dcdf = unconditional_delta_cdf->valuedata;
01174         if (separate_mass_point)
01175             a_[0] = unconditional_p0>0?inverse_sigmoid(unconditional_p0):-50;
01176         else if (fast_exact_is_equal(dcdf[0], 0))
01177             a_[0]=unconditional_p0>0?inverse_softplus(unconditional_p0):-50;
01178         else
01179         {
01180             real s=0;
01181             if (steps_type=="sigmoid_steps")
01182                 for (i=0;i<n_output_density_terms;i++)
01183                     s+=dcdf[i]*(unconditional_p0*sigmoid(s_c[i]*(maxY-mu_[i]))-sigmoid(-s_c[i]*mu_[i]));
01184             else
01185                 for (i=0;i<n_output_density_terms;i++)
01186                 {
01187                     real prev_mu = i==0?0:mu_[i-1];
01188                     real ss1 = soft_slope(maxY,s_c[i],prev_mu,mu_[i]);
01189                     real ss2 = soft_slope(0,s_c[i],prev_mu,mu_[i]);
01190                     s+=dcdf[i]*(unconditional_p0*ss1 - ss2);
01191                 }
01192             real sa=s/(1-unconditional_p0);
01193             a_[0]=sa>0?inverse_softplus(sa):-50;
01194 
01195             /*
01196               Mat At(n_output_density_terms,n_output_density_terms); // transpose of the linear system matrix
01197               Mat rhs(1,n_output_density_terms); // right hand side of the linear system
01198               // solve the system to find b's that make the unconditional fit the observed data
01199               //  sum_j sb_j dcdf_j (cdf_j step_j(maxY) - step_j(mu_i)) = sa (1 - cdf_i)
01200               //
01201               for (int i=0;i<n_output_density_terms;i++)
01202               {
01203               real* Ati = At[i];
01204               real prev_mu = i==0?0:mu_[i-1];
01205               for (int j=0;j<n_output_density_terms;j++)
01206               {
01207               if (steps_type=="sigmoid_steps")
01208               Ati[j] = dcdf[i]*(unconditional_cdf[j]*sigmoid(initial_hardness*(maxY-mu_[i]))-
01209               sigmoid(initial_hardness*(mu_[j]-mu_[i])));
01210               else
01211               Ati[j] = dcdf[i]*(unconditional_cdf[j]*soft_slope(maxY,initial_hardness,prev_mu,mu_[i])-
01212               soft_slope(mu_[j],initial_hardness,prev_mu,mu_[i]));
01213               }
01214               rhs[0][i] = sa*(1-unconditional_cdf[i]);
01215               }
01216               TVec<int> pivots(n_output_density_terms);
01217               int status = lapackSolveLinearSystem(At,rhs,pivots);
01218               if (status==0)
01219               for (int i=0;i<n_output_density_terms;i++)
01220               b_[i] = inverse_softplus(rhs[0][i]);
01221               else
01222               PLWARNING("ConditionalDensityNet::initializeParams() Could not invert matrix to obtain exact init. of b");
01223             */
01224         }
01225         test_costf->recomputeParents();
01226 
01227         // debugging
01228         static bool display_graph = false;
01229         if (display_graph) f->fprop(input->value,outputs->value);
01230         //displayVarGraph(outputs,true);
01231         if (display_graph)
01232             displayFunction(f,true);
01233         if (display_graph)
01234             displayFunction(test_costf,true);
01235     }
01236     int initial_stage = stage;
01237     bool early_stop=false;
01238     while(stage<nstages && !early_stop)
01239     {
01240         optimizer->nstages = optstage_per_lstage;
01241         train_stats->forget();
01242         optimizer->early_stop = false;
01243         early_stop = optimizer->optimizeN(*train_stats);
01244 
01245         //if (verify_gradient)
01246         //  training_cost->verifyGradient(verify_gradient);
01247         //if (stage==nstages-1 && verify_gradient)
01248         static bool verify_gradient = false;
01249         if (verify_gradient)
01250         {
01251             if (batch_size == 0)
01252             {
01253                 cout << "OPTIMIZER" << endl;
01254                 optimizer->verifyGradient(0.001);
01255             }
01256         }
01257         static bool display_graph = false;
01258         if (display_graph)
01259             displayFunction(f,true);
01260         if (display_graph)
01261             displayFunction(test_costf,true);
01262 
01263         train_stats->finalize();
01264         if(verbosity>2)
01265             cerr << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
01266         ++stage;
01267         if(pb)
01268             pb->update(stage-initial_stage);
01269     }
01270     if(verbosity>1)
01271         cerr << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
01272 
01273     if(pb)
01274         delete pb;
01275 
01276     test_costf->recomputeParents();
01277 }
01278 
01279 } // end of namespace PLearn
01280 
01281 
01282 /*
01283   Local Variables:
01284   mode:c++
01285   c-basic-offset:4
01286   c-file-style:"stroustrup"
01287   c-file-offsets:((innamespace . 0)(inline-open . 0))
01288   indent-tabs-mode:nil
01289   fill-column:79
01290   End:
01291 */
01292 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines