PLearn 0.1
LinearInductiveTransferClassifier.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // LinearInductiveTransferClassifier.cc
00004 //
00005 // Copyright (C) 2006 Hugo Larochelle 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036    * $Id: .pyskeleton_header 544 2003-09-01 00:05:31Z plearner $ 
00037    ******************************************************* */
00038 
00039 // Authors: Hugo Larochelle
00040 
00044 #include "LinearInductiveTransferClassifier.h"
00045 #include <plearn/var/AffineTransformVariable.h>
00046 #include <plearn/var/ArgmaxVariable.h>
00047 #include <plearn/var/SourceVariable.h>
00048 #include <plearn/var/AffineTransformWeightPenalty.h>
00049 #include <plearn/var/ClassificationLossVariable.h>
00050 #include <plearn/var/ConcatColumnsVariable.h>
00051 #include <plearn/var/ColumnSumVariable.h>
00052 #include <plearn/var/ConcatRowsVariable.h>
00053 #include <plearn/var/CrossEntropyVariable.h>
00054 #include <plearn/var/DotProductVariable.h>
00055 #include <plearn/var/DuplicateRowVariable.h>
00056 #include <plearn/var/DivVariable.h>
00057 #include <plearn/var/ExpVariable.h>
00058 //#include <plearn/var/LogSoftmaxVariable.h>
00059 #include <plearn/var/LiftOutputVariable.h>
00060 #include <plearn/var/MulticlassLossVariable.h>
00061 #include <plearn/var/NegCrossEntropySigmoidVariable.h>
00062 #include <plearn/var/OneHotVariable.h>
00063 //#include <plearn/var/PowVariable.h>
00064 #include <plearn/var/ProductTransposeVariable.h>
00065 #include <plearn/var/ProductVariable.h>
00066 #include <plearn/var/ReshapeVariable.h>
00067 #include <plearn/var/SigmoidVariable.h>
00068 #include <plearn/var/SoftmaxVariable.h>
00069 #include <plearn/var/SumVariable.h>
00070 #include <plearn/var/SumAbsVariable.h>
00071 #include <plearn/var/SumOfVariable.h>
00072 #include <plearn/var/SumSquareVariable.h>
00073 #include <plearn/var/TanhVariable.h>
00074 #include <plearn/var/TimesVariable.h>
00075 #include <plearn/var/TransposeVariable.h>
00076 #include <plearn/var/TransposeProductVariable.h>
00077 #include <plearn/var/VarRowsVariable.h>
00078 #include <plearn/var/Var_operators.h>
00079 #include <plearn/var/Var_utils.h>
00080 #include <plearn/display/DisplayUtils.h>
00081 #include <plearn/vmat/ConcatColumnsVMatrix.h>
00082 #include <plearn/math/random.h>
00083 #include <plearn/math/plapack.h>
00084 #include <plearn_learners/online/RBMMatrixConnection.h>
00085 
00086 namespace PLearn {
00087 using namespace std;
00088 
00089 PLEARN_IMPLEMENT_OBJECT(
00090     LinearInductiveTransferClassifier,
00091     "Linear classifier that uses class representations",
00092     "Linear classifier that uses class representations in\n"
00093     "order to make use of inductive transfer between classes.");
00094 
00095 LinearInductiveTransferClassifier::LinearInductiveTransferClassifier() 
00096     : batch_size(1), 
00097       weight_decay(0), 
00098       penalty_type("L2_square"),
00099       initialization_method("uniform_linear"), 
00100       model_type("discriminative"),
00101       dont_consider_train_targets(false),
00102       use_bias_in_weights_prediction(false),
00103       multi_target_classifier(false),
00104       sigma_min(1e-5),
00105       nhidden(-1),
00106       rbm_nstages(0),
00107       rbm_learning_rate(0.01)
00108 {
00109     random_gen = new PRandom();
00110 }
00111 
00112 void LinearInductiveTransferClassifier::declareOptions(OptionList& ol)
00113 {
00114     declareOption(ol, "optimizer", &LinearInductiveTransferClassifier::optimizer, 
00115                   OptionBase::buildoption,
00116                   "Optimizer of the discriminative classifier");
00117     declareOption(ol, "rbm_nstages", 
00118                   &LinearInductiveTransferClassifier::rbm_nstages, 
00119                   OptionBase::buildoption,
00120                   "Number of RBM training to initialize hidden layer weights");
00121     declareOption(ol, "rbm_learning_rate", 
00122                   &LinearInductiveTransferClassifier::rbm_learning_rate, 
00123                   OptionBase::buildoption,
00124                   "Learning rate for the RBM");
00125     declareOption(ol, "visible_layer",
00126                   &LinearInductiveTransferClassifier::visible_layer, 
00127                   OptionBase::buildoption,
00128                   "Visible layer of the RBM");
00129     declareOption(ol, "hidden_layer",
00130                   &LinearInductiveTransferClassifier::hidden_layer, 
00131                   OptionBase::buildoption,
00132                   "Hidden layer of the RBM");
00133     declareOption(ol, "batch_size", &LinearInductiveTransferClassifier::batch_size,
00134                   OptionBase::buildoption, 
00135                   "How many samples to use to estimate the avergage gradient before updating the weights\n"
00136                   "0 is equivalent to specifying training_set->length() \n");
00137     declareOption(ol, "weight_decay", 
00138                   &LinearInductiveTransferClassifier::weight_decay, 
00139                   OptionBase::buildoption, 
00140                   "Global weight decay for all layers\n");
00141     declareOption(ol, "model_type", &LinearInductiveTransferClassifier::model_type,
00142                   OptionBase::buildoption, 
00143                   "Model type. Choose between:\n"
00144                   " - \"discriminative\"               (multiclass classifier)\n"
00145                   " - \"discriminative_1_vs_all\"      (1 vs all multitask classier)\n"
00146                   " - \"generative\"                   (gaussian input)\n"
00147                   " - \"generative_0-1\"               ([0,1] input)\n"
00148                   " - \"nnet_discriminative_1_vs_all\" ([0,1] input)\n"
00149         );
00150     declareOption(ol, "penalty_type", 
00151                   &LinearInductiveTransferClassifier::penalty_type,
00152                   OptionBase::buildoption,
00153                   "Penalty to use on the weights (for weight and bias decay).\n"
00154                   "Can be any of:\n"
00155                   "  - \"L1\": L1 norm,\n"
00156                   "  - \"L1_square\": square of the L1 norm,\n"
00157                   "  - \"L2_square\" (default): square of the L2 norm.\n");
00158     declareOption(ol, "initialization_method", 
00159                   &LinearInductiveTransferClassifier::initialization_method, 
00160                   OptionBase::buildoption, 
00161                   "The method used to initialize the weights:\n"
00162                   " - \"normal_linear\"  = a normal law with variance 1/n_inputs\n"
00163                   " - \"normal_sqrt\"    = a normal law with variance 1/sqrt(n_inputs)\n"
00164                   " - \"uniform_linear\" = a uniform law in [-1/n_inputs, 1/n_inputs]\n"
00165                   " - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(n_inputs), 1/sqrt(n_inputs)]\n"
00166                   " - \"zero\"           = all weights are set to 0\n");    
00167     declareOption(ol, "paramsvalues", 
00168                   &LinearInductiveTransferClassifier::paramsvalues, 
00169                   OptionBase::learntoption, 
00170                   "The learned parameters\n");
00171     declareOption(ol, "class_reps", &LinearInductiveTransferClassifier::class_reps,
00172                   OptionBase::buildoption, 
00173                   "Class vector representations\n");
00174     declareOption(ol, "dont_consider_train_targets", 
00175                   &LinearInductiveTransferClassifier::dont_consider_train_targets, 
00176                   OptionBase::buildoption, 
00177                   "Indication that the targets seen in the training set\n"
00178                   "should not be considered when tagging a new set\n");
00179     declareOption(ol, "use_bias_in_weights_prediction", 
00180                   &LinearInductiveTransferClassifier::use_bias_in_weights_prediction, 
00181                   OptionBase::buildoption, 
00182                   "Indication that a bias should be used for weights prediction\n");
00183     declareOption(ol, "multi_target_classifier", 
00184                   &LinearInductiveTransferClassifier::multi_target_classifier, 
00185                   OptionBase::buildoption, 
00186                   "Indication that the classifier works with multiple targets,\n"
00187                   "possibly ON simulatneously.\n");
00188     declareOption(ol, "sigma_min", &LinearInductiveTransferClassifier::sigma_min, 
00189                   OptionBase::buildoption, 
00190                   "Minimum variance for all coordinates, which is added\n"
00191                   "to the maximum likelihood estimates.\n");
00192     declareOption(ol, "nhidden", &LinearInductiveTransferClassifier::nhidden, 
00193                   OptionBase::buildoption, 
00194                   "Number of hidden units for neural network.");
00195 
00196     // Now call the parent class' declareOptions
00197     inherited::declareOptions(ol);
00198 }
00199 
00200 void LinearInductiveTransferClassifier::build_()
00201 {
00202     /*
00203      * Create Topology Var Graph
00204      */
00205 
00206     // Don't do anything if we don't have a train_set
00207     // It's the only one who knows the inputsize and targetsize anyway...
00208     // Also, nothing is done if no layers need to be added
00209     if(inputsize_>=0 && targetsize_>=0 && weightsize_>=0)
00210     {
00211         if (seed_ != 0) random_gen->manual_seed(seed_);//random_gen->manual_seed(seed_);
00212 
00213         input = Var(inputsize(), "input");
00214         target = Var(targetsize(),"target");
00215         if(class_reps.size()<=0) 
00216             PLERROR("LinearInductiveTransferClassifier::build_(): class_reps is empty");
00217         noutputs = class_reps.length();
00218         buildTargetAndWeight();
00219         params.resize(0);
00220 
00221         Mat class_reps_to_use;
00222         if(use_bias_in_weights_prediction)
00223         {
00224             // Add column with 1s, to include bias
00225             Mat class_reps_with_bias(class_reps.length(), class_reps.width()+1);
00226             for(int i=0; i<class_reps_with_bias.length(); i++)
00227                 for(int j=0; j<class_reps_with_bias.width(); j++)
00228                 {
00229                     if(j==0)
00230                         class_reps_with_bias(i,j) = 1;
00231                     else
00232                         class_reps_with_bias(i,j) = class_reps(i,j-1);
00233                 }
00234             class_reps_to_use = class_reps_with_bias;
00235         }
00236         else
00237         {
00238             class_reps_to_use = class_reps;
00239         }
00240                 
00241 
00242         if(model_type == "nnet_discriminative_1_vs_all")
00243         {
00244             if(nhidden <= 0)
00245                 PLERROR("In LinearInductiveTransferClassifier::build_(): nhidden "
00246                         "must be > 0.");
00247 //            Ws.resize(nhidden); 
00248 //            As.resize(nhidden);
00249 //            s_hids.resize(nhidden);
00250 //            s = Var(1,nhidden,"sigma_square");
00251 //            for(int i=0; i<Ws.length(); i++)
00252 //            {
00253 //                Ws[i] = Var(inputsize_,class_reps_to_use.width());
00254 //                As[i] = Var(1,class_reps_to_use.width());
00255 //                s_hids[i] = Var(1,inputsize_);
00256 //            }
00257             W = Var(inputsize_+1,nhidden,"hidden_weights");
00258             A = Var(nhidden,class_reps_to_use.width());
00259             s = Var(1,nhidden,"sigma_square");
00260             params.push_back(W);
00261             params.push_back(A);
00262             params.push_back(s);
00263 //            params.append(Ws);
00264 //            params.append(As);
00265 //            params.append(s);
00266 //            params.append(s_hids);
00267 //            A = vconcat(As);
00268         }
00269         else
00270         {
00271             A = Var(inputsize_,class_reps_to_use.width());
00272             s = Var(1,inputsize_,"sigma_square");
00273             //fillWeights(A,false);     
00274             params.push_back(A);
00275             params.push_back(s);        
00276         }
00277         
00278 
00279         class_reps_var = new SourceVariable(class_reps_to_use);
00280         Var weights = productTranspose(A,class_reps_var);
00281         if(model_type == "discriminative" || model_type == "discriminative_1_vs_all")
00282         { 
00283             weights =vconcat(-product(exp(s),square(weights)) & weights); // Making sure that the scaling factor is going to be positive
00284             output = affine_transform(input, weights);
00285         }
00286         else if(model_type == "generative_0-1")
00287         {
00288             PLERROR("Not implemented yet");
00289             //weights = vconcat(columnSum(log(A/(exp(A)-1))) & weights);
00290             //output = affine_transform(input, weights);
00291         }
00292         else if(model_type == "generative")
00293         {
00294             weights = vconcat(-columnSum(square(weights)/transpose(duplicateRow(s,noutputs))) & 2*weights/transpose(duplicateRow(s,noutputs)));
00295             if(targetsize() == 1)
00296                 output = affine_transform(input, weights);
00297             else
00298                 output = exp(affine_transform(input, weights) - duplicateRow(dot(transpose(input)/s,input),noutputs))+REAL_EPSILON;
00299         }
00300         else if(model_type == "nnet_discriminative_1_vs_all")
00301         {
00302             //hidden_neurons.resize(nhidden);
00303             //Var weights;
00304             //for(int i=0; i<nhidden; i++)
00305             //{
00306             //    weights = productTranspose(Ws[i],class_reps_var);
00307             //    weights = vconcat(-product(exp(s_hids[i]),square(weights)) 
00308             //                      & weights); 
00309             //    hidden_neurons[i] = tanh(affine_transform(input, weights));
00310             //}
00311             //
00312             //weights = productTranspose(A,class_reps_var);
00313             //output = -transpose(product(exp(s),square(weights)));
00314             //
00315             //for(int i=0; i<nhidden; i++)
00316             //{
00317             //    output = output + times(productTranspose(class_reps_var,As[i]),
00318             //                   hidden_neurons[i]);
00319             //}
00320             weights =vconcat(-product(exp(s),square(weights)) & weights); // Making sure that the scaling factor is going to be positive
00321             if(rbm_nstages>0)
00322                 output = affine_transform(tanh(affine_transform(input,W)), weights);
00323             else
00324                 output = affine_transform(sigmoid(affine_transform(input,W)), weights);
00325         }
00326 
00327         else
00328             PLERROR("In LinearInductiveTransferClassifier::build_(): model_type %s is not valid", model_type.c_str());
00329 
00330         TVec<bool> class_tags(noutputs);
00331         if(targetsize() == 1)
00332         {
00333             Vec row(train_set.width());
00334             int target_class;
00335             class_tags.fill(0);
00336             for(int i=0; i<train_set.length(); i++)
00337             {
00338                 train_set->getRow(i,row);
00339                 target_class = (int) row[train_set->inputsize()];
00340                 class_tags[target_class] = 1;
00341             }
00342             
00343             seen_targets.resize(0);
00344             unseen_targets.resize(0);
00345             for(int i=0; i<class_tags.length(); i++)
00346                 if(class_tags[i])
00347                     seen_targets.push_back(i);
00348                 else
00349                     unseen_targets.push_back(i);
00350         }
00351         
00352         if(targetsize() != 1 && !multi_target_classifier)
00353             PLERROR("In LinearInductiveTransferClassifier::build_(): when targetsize() != 1, multi_target_classifier should be true.");
00354         if(targetsize() == 1 && multi_target_classifier)
00355             PLERROR("In LinearInductiveTransferClassifier::build_(): when targetsize() == 1, multi_target_classifier should be false.");
00356         
00357 
00358         if(targetsize() == 1 && seen_targets.length() != class_tags.length())
00359         {
00360             sup_output = new VarRowsVariable(output,new SourceVariable(seen_targets));
00361             if(dont_consider_train_targets)
00362                 new_output = new VarRowsVariable(output,new SourceVariable(unseen_targets));
00363             else
00364                 new_output = output;
00365             Var sup_mapping = new SourceVariable(noutputs,1);
00366             Var new_mapping = new SourceVariable(noutputs,1);
00367             int sup_id = 0;
00368             int new_id = 0;
00369             for(int k=0; k<class_tags.length(); k++)
00370             {
00371                 if(class_tags[k])
00372                 {
00373                     sup_mapping->value[k] = sup_id;
00374                     new_mapping->value[k] = MISSING_VALUE;
00375                     sup_id++;
00376                 }
00377                 else
00378                 {
00379                     sup_mapping->value[k] = MISSING_VALUE;
00380                     new_mapping->value[k] = new_id;
00381                     new_id++;
00382                 }
00383             }
00384             sup_target = new VarRowsVariable(sup_mapping, target);
00385             if(dont_consider_train_targets)
00386                 new_target = new VarRowsVariable(new_mapping, target);
00387             else
00388                 new_target = target;
00389         }
00390         else
00391         {
00392             sup_output = output;
00393             new_output = output;
00394             sup_target = target;
00395             new_target = target;            
00396         }
00397 
00398         // Build costs
00399         if(model_type == "discriminative" || model_type == "discriminative_1_vs_all" || model_type == "generative_0-1" || model_type == "nnet_discriminative_1_vs_all")
00400         {
00401             if(model_type == "discriminative")
00402             {
00403                 if(targetsize() != 1)
00404                     PLERROR("In LinearInductiveTransferClassifier::build_(): can't use discriminative model with targetsize() != 1");
00405                 costs.resize(2);
00406                 new_costs.resize(2);
00407                 sup_output = softmax(sup_output);
00408                 costs[0] = neg_log_pi(sup_output,sup_target);
00409                 costs[1] = classification_loss(sup_output, sup_target);
00410                 new_output = softmax(new_output);
00411                 new_costs[0] = neg_log_pi(new_output,new_target);
00412                 new_costs[1] = classification_loss(new_output, new_target);
00413             }
00414             if(model_type == "discriminative_1_vs_all" 
00415                || model_type == "nnet_discriminative_1_vs_all")
00416             {
00417                 costs.resize(2);
00418                 new_costs.resize(2);
00419                 if(targetsize() == 1)
00420                 {
00421                     costs[0] = stable_cross_entropy(sup_output, onehot(seen_targets.length(),sup_target));
00422                     costs[1] = classification_loss(sigmoid(sup_output), sup_target);
00423                 }
00424                 else
00425                 {
00426                     costs[0] = stable_cross_entropy(sup_output, sup_target, true);
00427                     costs[1] = transpose(lift_output(sigmoid(sup_output)+0.001, sup_target));
00428                 }
00429                 if(targetsize() == 1)
00430                 {
00431                     if(dont_consider_train_targets)
00432                         new_costs[0] = stable_cross_entropy(new_output, onehot(unseen_targets.length(),new_target));
00433                     else
00434                         new_costs[0] = stable_cross_entropy(new_output, onehot(noutputs,new_target));
00435                     new_costs[1] = classification_loss(sigmoid(new_output), new_target);
00436                 }
00437                 else
00438                 {
00439                     new_costs.resize(costs.length());
00440                     for(int i=0; i<new_costs.length(); i++)
00441                         new_costs[i] = costs[i];
00442                 }
00443             }
00444             if(model_type == "generative_0-1")
00445             {
00446                 costs.resize(2);
00447                 new_costs.resize(2);
00448                 if(targetsize() == 1)
00449                 {
00450                     costs[0] = sup_output;
00451                     costs[1] = classification_loss(sigmoid(sup_output), sup_target);
00452                 }
00453                 else
00454                 {
00455                     PLERROR("In LinearInductiveTransferClassifier::build_(): can't use generative_0-1 model with targetsize() != 1");
00456                     costs[0] = sup_output;
00457                     costs[1] = transpose(lift_output(sigmoid(exp(sup_output)+REAL_EPSILON), sup_target));
00458                 }
00459                 if(targetsize() == 1)
00460                 {
00461                     new_costs[0] = new_output;
00462                     new_costs[1] = classification_loss(new_output, new_target);
00463                 }
00464                 else
00465                 {
00466                     new_costs.resize(costs.length());
00467                     for(int i=0; i<new_costs.length(); i++)
00468                         new_costs[i] = costs[i];
00469                 }
00470             }
00471         }
00472         else if(model_type == "generative")
00473         {
00474             costs.resize(1);
00475             if(targetsize() == 1)
00476                 costs[0] = classification_loss(sup_output, sup_target);
00477             else
00478                 costs[0] = transpose(lift_output(sigmoid(sup_output), sup_target));
00479             if(targetsize() == 1)
00480             {
00481                 new_costs.resize(1);
00482                 new_costs[0] = classification_loss(new_output, new_target);
00483             }
00484             else
00485             {
00486                 new_costs.resize(costs.length());
00487                 for(int i=0; i<new_costs.length(); i++)
00488                     new_costs[i] = costs[i];
00489             }
00490         }
00491         else PLERROR("LinearInductiveTransferClassifier::build_(): model_type \"%s\" invalid",model_type.c_str());
00492 
00493 
00494         string pt = lowerstring( penalty_type );
00495         if( pt == "l1" )
00496             penalty_type = "L1";
00497         else if( pt == "l1_square" || pt == "l1 square" || pt == "l1square" )
00498             penalty_type = "L1_square";
00499         else if( pt == "l2_square" || pt == "l2 square" || pt == "l2square" )
00500             penalty_type = "L2_square";
00501         else if( pt == "l2" )
00502         {
00503             PLWARNING("L2 penalty not supported, assuming you want L2 square");
00504             penalty_type = "L2_square";
00505         }
00506         else
00507             PLERROR("penalty_type \"%s\" not supported", penalty_type.c_str());
00508 
00509         buildPenalties();
00510         Var train_costs = hconcat(costs);
00511         test_costs = hconcat(new_costs);
00512 
00513         // Apply penalty to cost.
00514         // If there is no penalty, we still add costs[0] as the first cost, in
00515         // order to keep the same number of costs as if there was a penalty.
00516         if(penalties.size() != 0) {
00517             if (weightsize_>0)
00518                 training_cost = hconcat(sampleweight*sum(hconcat(costs[0] & penalties))
00519                                         & (train_costs*sampleweight));
00520             else 
00521                 training_cost = hconcat(sum(hconcat(costs[0] & penalties)) & train_costs);
00522         }
00523         else {
00524             if(weightsize_>0) {
00525                 training_cost = hconcat(costs[0]*sampleweight & train_costs*sampleweight);
00526             } else {
00527                 training_cost = hconcat(costs[0] & train_costs);
00528             }
00529         }
00530 
00531         training_cost->setName("training_cost");
00532         test_costs->setName("test_costs");
00533 
00534 
00535         if((bool)paramsvalues && (paramsvalues.size() == params.nelems()))
00536             params << paramsvalues;
00537         else
00538             paramsvalues.resize(params.nelems());
00539         params.makeSharedValue(paramsvalues);
00540         
00541         // Build functions.
00542         buildFuncs(input, output, target, sampleweight);
00543         
00544         // Reinitialize the optimization phase
00545         if(optimizer)
00546             optimizer->reset();
00547         stage = 0;        
00548     }
00549 }
00550 
00551 
00552 // ### Nothing to add here, simply calls build_
00553 void LinearInductiveTransferClassifier::build()
00554 {
00555     inherited::build();
00556     build_();
00557 }
00558 
00559 
00560 void LinearInductiveTransferClassifier::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00561 {
00562     inherited::makeDeepCopyFromShallowCopy(copies);
00563     deepCopyField(class_reps, copies);
00564     deepCopyField(optimizer, copies);
00565     deepCopyField(visible_layer, copies);
00566     deepCopyField(hidden_layer, copies);
00567 
00568     varDeepCopyField(input, copies);
00569     varDeepCopyField(output, copies);
00570     varDeepCopyField(sup_output, copies);
00571     varDeepCopyField(new_output, copies);
00572     varDeepCopyField(target, copies);
00573     varDeepCopyField(sup_target, copies);
00574     varDeepCopyField(new_target, copies);
00575     varDeepCopyField(sampleweight, copies);
00576     varDeepCopyField(A, copies);
00577     varDeepCopyField(s, copies);
00578     varDeepCopyField(class_reps_var, copies);
00579 
00580     deepCopyField(costs, copies);
00581     deepCopyField(new_costs, copies);
00582     deepCopyField(params, copies);
00583     deepCopyField(paramsvalues, copies);
00584     deepCopyField(penalties, copies);
00585 
00586     varDeepCopyField(training_cost, copies);
00587     varDeepCopyField(test_costs, copies);
00588 
00589     deepCopyField(invars, copies);
00590     deepCopyField(seen_targets, copies);
00591     deepCopyField(unseen_targets, copies);
00592 
00593     deepCopyField(f, copies);
00594     deepCopyField(test_costf, copies);
00595     deepCopyField(output_and_target_to_cost, copies);
00596     deepCopyField(sup_test_costf, copies);
00597     deepCopyField(sup_output_and_target_to_cost, copies);
00598 
00599     varDeepCopyField(W, copies);
00600     //deepCopyField(As, copies);
00601     //deepCopyField(Ws, copies);
00602     //deepCopyField(s_hids, copies);
00603     //deepCopyField(hidden_neurons, copies);
00604 
00605     //PLERROR("LinearInductiveTransferClassifier::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00606 }
00607 
00608 
00609 int LinearInductiveTransferClassifier::outputsize() const
00610 {
00611     if(output)
00612         return output->size();
00613     else
00614         return 0;
00615 }
00616 
00617 void LinearInductiveTransferClassifier::forget()
00618 {
00619     if(optimizer)
00620         optimizer->reset();
00621     stage = 0;
00622     
00623     if(model_type == "nnet_discriminative_1_vs_all")
00624     {
00625 //        for(int i=0; i<Ws.length(); i++)
00626 //        {
00627 //            fillWeights(Ws[i],false,1./(inputsize_*class_reps.width()));
00628 //            fillWeights(As[i],false,1./(nhidden*class_reps.width()));
00629 //            s_hids[i]->value.fill(1);
00630 //        }
00631         fillWeights(W,true);
00632         fillWeights(A,false,1./(nhidden*class_reps.width()));
00633         s->value.fill(1);
00634     }
00635     else
00636     {
00637         //A = Var(inputsize_,class_reps_to_use.width());
00638         A->value.fill(0);
00639         s->value.fill(1);
00640     }
00641 
00642     // Might need to recompute proppaths (if number of task representations changed
00643     // for instance)
00644     build();
00645 }
00646     
00647 void LinearInductiveTransferClassifier::train()
00648 {
00649     if(!train_set)
00650         PLERROR("In DeepFeatureExtractor::train, you did not setTrainingSet");
00651     
00652     if(!train_stats)
00653         PLERROR("In DeepFeatureExtractor::train, you did not setTrainStatsCollector");
00654 
00655     int l = train_set->length();  
00656 
00657     if(f.isNull()) // Net has not been properly built yet (because build was called before the learner had a proper training set)
00658         build();
00659     
00660     if(rbm_nstages>0 && stage == 0 && nstages > 0 && model_type == "nnet_discriminative_1_vs_all")
00661     {
00662         if(!visible_layer)
00663             PLERROR("In LinearInductiveTransferClassifier::train(): "
00664                     "visible_layer must be provided.");
00665         if(!hidden_layer)
00666             PLERROR("In LinearInductiveTransferClassifier::train(): "
00667                     "hidden_layer must be provided.");
00668 
00669         Vec input, target;
00670         real example_weight;
00671         real recons = 0;
00672         RBMMatrixConnection* c = new RBMMatrixConnection();
00673         PP<RBMMatrixConnection> layer_matrix_connections = c;
00674         PP<RBMConnection> layer_connections = c;
00675         hidden_layer->size = nhidden;
00676         visible_layer->size = inputsize_;
00677         layer_connections->up_size = inputsize_;
00678         layer_connections->down_size = nhidden;
00679         
00680         hidden_layer->random_gen = random_gen;
00681         visible_layer->random_gen = random_gen;
00682         layer_connections->random_gen = random_gen;
00683 
00684         visible_layer->setLearningRate(rbm_learning_rate);
00685         hidden_layer->setLearningRate(rbm_learning_rate);
00686         layer_connections->setLearningRate(rbm_learning_rate);
00687 
00688         
00689         hidden_layer->build();
00690         visible_layer->build();
00691         layer_connections->build();
00692         
00693         Vec pos_visible,pos_hidden,neg_visible,neg_hidden;
00694         pos_visible.resize(inputsize_);
00695         pos_hidden.resize(nhidden);
00696         neg_visible.resize(inputsize_);
00697         neg_hidden.resize(nhidden);
00698 
00699         for(int i = 0; i < rbm_nstages; i++)
00700         {
00701             for(int j=0; j<train_set->length(); j++)
00702             {
00703                 train_set->getExample(j,input,target,example_weight);
00704 
00705                 pos_visible = input;
00706                 layer_connections->setAsUpInput( input );
00707                 hidden_layer->getAllActivations( layer_connections );
00708                 hidden_layer->computeExpectation();
00709                 hidden_layer->generateSample();
00710                 pos_hidden << hidden_layer->expectation;            
00711 
00712                 layer_connections->setAsDownInput( hidden_layer->sample );
00713                 visible_layer->getAllActivations( layer_connections );
00714                 visible_layer->computeExpectation();
00715                 visible_layer->generateSample();
00716                 neg_visible = visible_layer->sample;
00717 
00718                 layer_connections->setAsUpInput( visible_layer->sample );
00719                 hidden_layer->getAllActivations( layer_connections );
00720                 hidden_layer->computeExpectation();
00721                 neg_hidden = hidden_layer->expectation;
00722 
00723                 // Compute reconstruction error
00724                 layer_connections->setAsDownInput( pos_hidden );
00725                 visible_layer->getAllActivations( layer_connections );
00726                 visible_layer->computeExpectation();
00727                 recons += visible_layer->fpropNLL(input);
00728                 
00729                 // Update
00730                 visible_layer->update(pos_visible, neg_visible);
00731                 hidden_layer->update(pos_hidden, neg_hidden);
00732                 layer_connections->update(pos_hidden, pos_visible,
00733                                           neg_hidden, neg_visible);
00734             }
00735             if(verbosity > 2)
00736                 cout << "Reconstruction error = " << recons/train_set->length() << endl;
00737             recons = 0;
00738         }
00739         W->matValue.subMat(1,0,inputsize_,nhidden) << layer_matrix_connections->weights;
00740         W->matValue(0) << hidden_layer->bias;
00741     }
00742 
00743     if(model_type == "discriminative" || model_type == "discriminative_1_vs_all" || model_type == "generative_0-1" || model_type == "nnet_discriminative_1_vs_all")
00744     {
00745         // number of samples seen by optimizer before each optimizer update
00746         int nsamples = batch_size>0 ? batch_size : l;
00747         Func paramf = Func(invars, training_cost); // parameterized function to optimize
00748         Var totalcost = meanOf(train_set, paramf, nsamples);
00749         if(optimizer)
00750         {
00751             optimizer->setToOptimize(params, totalcost);  
00752             optimizer->build();
00753         }
00754         else PLERROR("LinearInductiveTransferClassifier::train can't train without setting an optimizer first!");
00755 
00756         // number of optimizer stages corresponding to one learner stage (one epoch)
00757         int optstage_per_lstage = l/nsamples;
00758 
00759         PP<ProgressBar> pb;
00760         if(report_progress)
00761             pb = new ProgressBar("Training " + classname() + " from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage);
00762 
00763         int initial_stage = stage;
00764         bool early_stop=false;
00765         //displayFunction(paramf, true, false, 250);
00766         while(stage<nstages && !early_stop)
00767         {
00768             optimizer->nstages = optstage_per_lstage;
00769             train_stats->forget();
00770             optimizer->early_stop = false;
00771             optimizer->optimizeN(*train_stats);
00772             // optimizer->verifyGradient(1e-4); // Uncomment if you want to check your new Var.
00773             train_stats->finalize();
00774             if(verbosity>2)
00775                 cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl;
00776             ++stage;
00777             if(pb)
00778                 pb->update(stage-initial_stage);
00779         }
00780         if(verbosity>1)
00781             cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl;
00782     }
00783     else
00784     {
00785         Mat ww(class_reps_var->width(),class_reps_var->width()); ww.fill(0);
00786         Mat ww_inv(class_reps_var->width(),class_reps_var->width());
00787         Mat xw(inputsize(),class_reps_var->width()); xw.fill(0);
00788         Vec input, target;
00789         real weight;
00790         input.resize(train_set->inputsize());
00791         target.resize(train_set->targetsize());        
00792         for(int i=0; i<train_set->length(); i++)
00793         {
00794             train_set->getExample(i,input,target,weight);
00795             if(targetsize() == 1)
00796             {
00797                 if(weightsize()>0)
00798                 {
00799                     externalProductScaleAcc(ww,class_reps_var->matValue((int)target[0]),class_reps_var->matValue((int)target[0]),weight);
00800                     externalProductScaleAcc(xw,input,class_reps_var->matValue((int)target[0]),weight);
00801                 }
00802                 else
00803                 {
00804                     externalProductAcc(ww,class_reps_var->matValue((int)target[0]),class_reps_var->matValue((int)target[0]));
00805                     externalProductAcc(xw,input,class_reps_var->matValue((int)target[0]));
00806                 }
00807             }
00808             else
00809                 for(int j=0; j<target.length(); j++)
00810                 {
00811                     if(fast_exact_is_equal(target[j], 1)){
00812                         if(weightsize()>0)
00813                         {
00814                             externalProductScaleAcc(ww,class_reps_var->matValue(j),class_reps_var->matValue(j),weight);
00815                             externalProductScaleAcc(xw,input,class_reps_var->matValue(j),weight);
00816                         }
00817                         else
00818                         {
00819                             externalProductAcc(ww,class_reps_var->matValue(j),class_reps_var->matValue(j));
00820                             externalProductAcc(xw,input,class_reps_var->matValue(j));
00821                         }
00822                     }
00823                 }
00824         }
00825         if(weight_decay > 0)
00826             for(int i=0; i<ww.length(); i++)
00827                 ww(i,i) = ww(i,i) + weight_decay;
00828         matInvert(ww,ww_inv);
00829         A->value.fill(0);
00830         productAcc(A->matValue, xw, ww_inv);
00831         
00832         s->value.fill(0);
00833         Vec sample(s->size());
00834         Vec weights(inputsize());
00835         real sum = 0;
00836         for(int i=0; i<train_set->length(); i++)
00837         {
00838             train_set->getExample(i,input,target,weight);
00839             if(targetsize() == 1)
00840             {
00841                 product(weights,A->matValue,class_reps_var->matValue((int)target[0]));
00842                 if(weightsize()>0)
00843                 {
00844                     diffSquareMultiplyAcc(s->value,weights,input,weight);
00845                     sum += weight;
00846                 }
00847                 else
00848                 {
00849                     diffSquareMultiplyAcc(s->value,weights,input,real(1.0));
00850                     sum++;
00851                 }
00852             }
00853             else
00854                 for(int j=0; j<target.length(); j++)
00855                 {
00856                     if(fast_exact_is_equal(target[j], 1))
00857                     {
00858                         product(weights,A->matValue,class_reps_var->matValue(j));
00859                         if(weightsize()>0)
00860                         {
00861                             diffSquareMultiplyAcc(s->value,weights,input,weight);
00862                             sum += weight;
00863                         }
00864                         else
00865                         {
00866                             diffSquareMultiplyAcc(s->value,weights,input,real(1.0));
00867                             sum++;
00868                         }
00869                     }
00870                 }
00871         }
00872         s->value /= sum;
00873         s->value += sigma_min;
00874 
00875         if(verbosity > 2 && !multi_target_classifier)
00876         {
00877             Func paramf = Func(invars, training_cost);
00878             paramf->recomputeParents();
00879             real mean_cost = 0;
00880             Vec cost(2);
00881             Vec row(train_set->width());
00882             for(int i=0; i<train_set->length(); i++)
00883             {
00884                 train_set->getRow(i,row);
00885                 paramf->fprop(row.subVec(0,inputsize()+targetsize()),cost);
00886                 mean_cost += cost[1];
00887             }
00888             mean_cost /= train_set->length();
00889             cout << "Train class error: " << mean_cost << endl;
00890         }
00891     }
00892     // Hugo: I don't know why we have to do this?!?
00893     output_and_target_to_cost->recomputeParents();
00894     test_costf->recomputeParents();
00895 }
00896 
00897 void LinearInductiveTransferClassifier::computeOutput(const Vec& input, Vec& output) const
00898 {
00899     output.resize(outputsize());
00900     f->fprop(input,output);
00901 }    
00902 
00903 void LinearInductiveTransferClassifier::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00904                                            const Vec& target, Vec& costs) const
00905 {
00906     if(targetsize() != 1)
00907         costs.resize(costs.length()-1+targetsize());
00908     if(seen_targets.find(target[0])>=0)
00909         sup_output_and_target_to_cost->fprop(output&target, costs);
00910     else
00911         output_and_target_to_cost->fprop(output&target, costs);
00912     if(targetsize() != 1)
00913     {
00914         costs.resize(costs.length()+1);
00915         int i;
00916         for(i=0; i<target.length(); i++)
00917             if(!is_missing(target[i]))
00918                 break;
00919         if(i>= target.length())
00920             PLERROR("In LinearInductiveTransferClassifier::computeCostsFromOutputs(): all targets are missing, can't compute cost");
00921         if(model_type == "generative")
00922             costs[costs.length()-1] = costs[i];
00923         else
00924             costs[costs.length()-1] = costs[i+1];
00925         costs[costs.length()-targetsize()-1] = costs[costs.length()-1];
00926         costs.resize(costs.length()-targetsize());
00927     }
00928 }
00929 
00930 void LinearInductiveTransferClassifier::computeOutputAndCosts(const Vec& inputv, const Vec& targetv, 
00931                                  Vec& outputv, Vec& costsv) const
00932 {
00933     if(targetsize() != 1)
00934         costsv.resize(costsv.length()-1+targetsize());
00935 
00936     outputv.resize(outputsize());
00937     if(seen_targets.find(targetv[0])>=0)
00938         sup_test_costf->fprop(inputv&targetv, outputv&costsv);
00939     else
00940         test_costf->fprop(inputv&targetv, outputv&costsv);
00941 
00942     if(targetsize() != 1)
00943     {
00944         costsv.resize(costsv.length()+1);
00945         int i;
00946         for(i=0; i<targetv.length(); i++)
00947             if(!is_missing(targetv[i]))
00948                 break;
00949         if(i>= targetv.length())
00950             PLERROR("In LinearInductiveTransferClassifier::computeCostsFromOutputs(): all targets are missing, can't compute cost");
00951         //for(int j=i+1; j<targetv.length(); j++)
00952         //    if(!is_missing(targetv[j]))
00953         //        PLERROR("In LinearInductiveTransferClassifier::computeCostsFromOutputs(): there should be only one non-missing target");
00954         //cout << "i=" << i << " ";
00955         if(model_type == "generative")
00956             costsv[costsv.length()-1] = costsv[i];
00957         else
00958             costsv[costsv.length()-1] = costsv[i+1];
00959         costsv[costsv.length()-targetsize()-1] = costsv[costsv.length()-1];
00960         costsv.resize(costsv.length()-targetsize());
00961     }
00962 }
00963 
00964 TVec<string> LinearInductiveTransferClassifier::getTestCostNames() const
00965 {
00966     TVec<string> costs_str;
00967     if(model_type == "discriminative" || model_type == "discriminative_1_vs_all" || model_type == "generative_0-1" || model_type == "nnet_discriminative_1_vs_all")
00968     {
00969         if(model_type == "discriminative" || model_type == "generative_0-1")
00970         {
00971             costs_str.resize(2);
00972             costs_str[0] = "NLL";
00973             costs_str[1] = "class_error";
00974         }
00975         if(model_type == "discriminative_1_vs_all" 
00976            || model_type == "nnet_discriminative_1_vs_all")
00977         {
00978             costs_str.resize(1);
00979             costs_str[0] = "cross_entropy";
00980             if(!multi_target_classifier)
00981             {
00982                 costs_str.resize(2);
00983                 costs_str[1] = "class_error";
00984             }
00985             else
00986             {
00987                 costs_str.resize(2);
00988                 costs_str[1] = "lift_first";
00989             }
00990         }
00991     }
00992     else if(model_type == "generative")
00993     {
00994         if(!multi_target_classifier)
00995         {
00996             costs_str.resize(1);
00997             costs_str[0] = "class_error";
00998         }
00999         else
01000         {
01001             costs_str.resize(1);            
01002             costs_str[0] = "lift_first";
01003         }
01004     }
01005     return costs_str;
01006 }
01007 
01008 TVec<string> LinearInductiveTransferClassifier::getTrainCostNames() const
01009 {
01010     return getTestCostNames();
01011 }
01012 
01013 void LinearInductiveTransferClassifier::buildTargetAndWeight() {
01014     //if(nhidden_schedule_current_position >= nhidden_schedule.length())
01015     if(targetsize() > 0)
01016     {
01017         target = Var(targetsize(), "target");
01018         if(weightsize_>0)
01019         {
01020             if (weightsize_!=1)
01021                 PLERROR("In NNet::buildTargetAndWeight - Expected weightsize to be 1 or 0 (or unspecified = -1, meaning 0), got %d",weightsize_);
01022             sampleweight = Var(1, "weight");
01023         }
01024     }
01025 }
01026 
01027 void LinearInductiveTransferClassifier::buildPenalties() {
01028     penalties.resize(0);  // prevents penalties from being added twice by consecutive builds
01029     if(weight_decay > 0)
01030     {
01031         if(model_type == "nnet_discriminative_1_vs_all")
01032         {
01033             //for(int i=0; i<Ws.length(); i++)
01034             //{
01035             //    penalties.append(affine_transform_weight_penalty(Ws[i], weight_decay, weight_decay, penalty_type));
01036             //}
01037             penalties.append(affine_transform_weight_penalty(W, weight_decay, 0, penalty_type));
01038         }
01039         
01040         penalties.append(affine_transform_weight_penalty(A, weight_decay, weight_decay, penalty_type));
01041     }
01042 }
01043 
01044 void LinearInductiveTransferClassifier::fillWeights(const Var& weights, 
01045                                                     bool zero_first_row, 
01046                                                     real scale_with_this) {
01047     if (initialization_method == "zero") {
01048         weights->value->clear();
01049         return;
01050     }
01051     real delta;
01052     if(scale_with_this < 0)
01053     {
01054         int is = weights.length();
01055         if (zero_first_row)
01056             is--; // -1 to get the same result as before.
01057         if (initialization_method.find("linear") != string::npos)
01058             delta = 1.0 / real(is);
01059         else
01060             delta = 1.0 / sqrt(real(is));
01061     }
01062     else
01063         delta = scale_with_this;
01064 
01065     if (initialization_method.find("normal") != string::npos)
01066         random_gen->fill_random_normal(weights->value, 0, delta);
01067     else
01068         random_gen->fill_random_uniform(weights->value, -delta, delta);
01069     if(zero_first_row)
01070         weights->matValue(0).clear();
01071 }
01072 
01073 void LinearInductiveTransferClassifier::buildFuncs(const Var& the_input, const Var& the_output, const Var& the_target, const Var& the_sampleweight){
01074     invars.resize(0);
01075     VarArray outvars;
01076     VarArray testinvars;
01077     if (the_input)
01078     {
01079         invars.push_back(the_input);
01080         testinvars.push_back(the_input);
01081     }
01082     if (the_output)
01083         outvars.push_back(the_output);
01084     if(the_target)
01085     {
01086         invars.push_back(the_target);
01087         testinvars.push_back(the_target);
01088         outvars.push_back(the_target);
01089     }
01090     if(the_sampleweight)
01091     {
01092         invars.push_back(the_sampleweight);
01093     }
01094     f = Func(the_input, the_output);
01095     test_costf = Func(testinvars, the_output&test_costs);
01096     test_costf->recomputeParents();
01097     output_and_target_to_cost = Func(outvars, test_costs); 
01098     output_and_target_to_cost->recomputeParents();
01099 
01100     VarArray sup_outvars;
01101     sup_test_costf = Func(testinvars, the_output&hconcat(costs));
01102     sup_test_costf->recomputeParents();
01103     sup_output_and_target_to_cost = Func(outvars, hconcat(costs)); 
01104     sup_output_and_target_to_cost->recomputeParents();
01105 }
01106 
01107 } // end of namespace PLearn
01108 
01109 
01110 /*
01111   Local Variables:
01112   mode:c++
01113   c-basic-offset:4
01114   c-file-style:"stroustrup"
01115   c-file-offsets:((innamespace . 0)(inline-open . 0))
01116   indent-tabs-mode:nil
01117   fill-column:79
01118   End:
01119 */
01120 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines