PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // TangentLearner.cc 00004 // 00005 // Copyright (C) 2004 Martin Monperrus & Yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: TangentLearner.cc 6508 2006-12-15 02:35:49Z lamblin $ 00037 ******************************************************* */ 00038 00039 // Authors: Martin Monperrus & Yoshua Bengio 00040 00044 #include "TangentLearner.h" 00045 #include <plearn/var/ProjectionErrorVariable.h> 00046 //#include "LocalPCAVMatrix.h" 00047 #include <plearn/vmat/LocalNeighborsDifferencesVMatrix.h> 00048 #include <plearn/var/ProductVariable.h> 00049 #include <plearn/var/PlusVariable.h> 00050 #include <plearn/var/Var_operators.h> 00051 #include <plearn/var/NoBpropVariable.h> 00052 #include <plearn/vmat/ConcatColumnsVMatrix.h> 00053 #include <plearn/math/random.h> 00054 #include <plearn/var/SumOfVariable.h> 00055 #include <plearn/var/TanhVariable.h> 00056 #include <plearn/var/DiagonalizedFactorsProductVariable.h> 00057 #include <plearn/math/random.h> 00058 #include <plearn/math/plapack.h> 00059 //#include "TMat_maths.h" 00060 //#include "TVec_decl.h" 00061 00062 namespace PLearn { 00063 using namespace std; 00064 00065 // les neurones de la couche cachée correspondent à des hyperplans 00066 // la smartInitialization consiste a initialiser ces hyperplans passant 00067 // des points du train_set pris aleatoirement 00068 // comme ca, on est sur de bien quadriller l'espace des points. 00069 // le c correspond a une sorte de contre weight decay 00070 // plus c est grand plus on aura des poids grand et plus on a des neurones tranchés dans l'espace 00071 Mat smartInitialization(VMat v, int n, real c, real regularization) 00072 { 00073 int l = v->length(); 00074 int w = v->width(); 00075 00076 Mat result(n,w); 00077 Mat temp(w,w); 00078 Vec b(w); 00079 b<<c; 00080 00081 int i,j; 00082 00083 for (i=0;i<n;++i) 00084 { 00085 temp.clear(); 00086 for (j=0;j<w;++j) 00087 { 00088 v->getRow(uniform_multinomial_sample(l),temp(j)); 00089 } 00090 // regularization pour eviter 1/ quand on a tire deux fois le meme indice 2/ quand les points sont trops proches 00091 regularizeMatrix(temp,regularization); 00092 result(i) << solveLinearSystem(temp, b); 00093 } 00094 return result; 00095 } 00096 00097 TangentLearner::TangentLearner() 00098 /* ### Initialize all fields to their default value here */ 00099 : training_targets("local_neighbors"), use_subspace_distance(false), normalize_by_neighbor_distance(true), 00100 ordered_vectors(false), smart_initialization(0),initialization_regularization(1e-3), 00101 n_neighbors(5), n_dim(1), architecture_type("single_neural_network"), output_type("tangent_plane"), 00102 n_hidden_units(-1), batch_size(1), norm_penalization(0), svd_threshold(1e-5), 00103 projection_error_regularization(0), V_slack(0) 00104 00105 { 00106 } 00107 00108 PLEARN_IMPLEMENT_OBJECT(TangentLearner, "Learns local tangent plane of the manifold near which the data lie.", 00109 "This learner models a manifold near which the data are supposed to lie.\n" 00110 "The manifold is represented by a function which predicts a basis for the\n" 00111 "tangent planes at each point x, given x in R^n. Let f_i(x) be the predicted i-th tangent\n" 00112 "vector (in R^n). Then we will optimize the parameters that define the d functions f_i by\n" 00113 "pushing the f_i so that they span the local tangent directions. Three criteria are\n" 00114 "possible, according to the 'training_targets', 'normalize_by_neighbor_distance' and\n" 00115 "'use_subspace_distance' option. The default criterion is the recommanded one, with\n" 00116 " training_targets='local_neighbors', normalize_by_neighbor_distance=1,\n" 00117 "and use_subspace_distance=0 (it really did not work well in our experiments with\n" 00118 "use_subspace_distance=1). This corresponds to the following cost function:\n" 00119 " sum_x sum_j min_w ||t(x,j) - sum_i w_i f_i(x)||^2 / ||t(x,j)||^2\n" 00120 "where x is an example, t(x,j) is the difference vector between x and its j-th neighbor,\n" 00121 "and the w_i are chosen freely for each j and x and correspond to the weights given to\n" 00122 "each basis vector f_i(x) to obtain the projection of t(x,j) on the tangent plane.\n" 00123 "More generally, if use_subspace_distance,\n" 00124 " criterion = min_{w,u} || sum_i w_i f_i - sum_j u_j t(x,j) ||^2\n" 00125 " under the constraint that ||w||=1.\n" 00126 " else\n" 00127 " criterion = sum_x sum_j min_w ||t(x,j) - sum_i w_i f_i(x)||^2 / ||t(x,j)||^2\n" 00128 " where the first sum is over training examples and w is a free d-vector,\n" 00129 " t(x,j) estimates local tangent directions based on near neighbors, and the denominator\n" 00130 " ||t(x,j)||^2 is optional (normalize_by_neighbor_distance). t(x,j)\n" 00131 " is defined according to the training_targets option:\n" 00132 " 'local_evectors' : local principal components (based on n_neighbors of x)\n" 00133 " 'local_neighbors': difference between x and its n_neighbors.\n" 00134 "An additional criterion option that applies only to use_subspace_criterion=0 is\n" 00135 "the orderered_vectors option, which applies a separate cost to each of the f_i:\n" 00136 "the f_1 vector tries to make the projection of t(x,j) on f_1 close to t(x,j), while\n" 00137 "the f_2 vector tries to make the projection of t(x,j) on the (f_1,f_2) basis close to t(x,j),\n" 00138 "etc... i.e. the gradient on f_i is computed based on a cost that involves only\n" 00139 "the projection on the first i vectors. This is analogous to principal component analysis:\n" 00140 "the first vector tries to capture as much as possible of the variance, the second as much\n" 00141 "as possible of the remaining variance, etc...\n" 00142 "Different architectures are possible for the f_i(x) (architecture_type option):\n" 00143 " - multi_neural_network: one neural net per basis function\n" 00144 " - single_neural_network: single neural network with matrix output (one row per basis vector)\n" 00145 " - linear: F_{ij}(x) = sum_k A_{ijk} x_k\n" 00146 " - embedding_neural_network: the embedding function e_k(x) (for k-th dimension)\n" 00147 " is an ordinary neural network, and F_{ki}(x) = d(e_k(x))/d(x_i). This allows to\n" 00148 " output the embedding, instead of, or as well as, the tangent plane (output_type option).\n" 00149 " - embedding_quadratic: the embedding function e_k(x) (for k-th dimension)\n" 00150 " is a 2nd order polynomial of x, and F_{ki}(x) = d(e_k(x))/d(x_i). This allows to\n" 00151 " output the embedding, instead of, or as well as, the tangent plane (output_type option).\n" 00152 ); 00153 00154 void TangentLearner::declareOptions(OptionList& ol) 00155 { 00156 // ### Declare all of this object's options here 00157 // ### For the "flags" of each option, you should typically specify 00158 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00159 // ### OptionBase::tuningoption. Another possible flag to be combined with 00160 // ### is OptionBase::nosave 00161 00162 00163 declareOption(ol, "training_targets", &TangentLearner::training_targets, OptionBase::buildoption, 00164 "Specifies a strategy for training the tangent plane predictor. Possible values are the strings\n" 00165 " local_evectors : local principal components (based on n_neighbors of x)\n" 00166 " local_neighbors : difference between x and its n_neighbors.\n" 00167 ); 00168 declareOption(ol, "smart_initialization",&TangentLearner::smart_initialization,OptionBase::buildoption, 00169 "Use of Smart Initialization"); 00170 00171 declareOption(ol, "initialization_regularization",&TangentLearner::initialization_regularization,OptionBase::buildoption, 00172 "initialization_regularization"); 00173 00174 declareOption(ol, "use_subspace_distance", &TangentLearner::use_subspace_distance, OptionBase::buildoption, 00175 "Minimize distance between subspace spanned by f_i and by (x-neighbors), instead of between\n" 00176 "the individual targets t_j and the subspace spanned by the f_i.\n"); 00177 00178 declareOption(ol, "normalize_by_neighbor_distance", &TangentLearner::normalize_by_neighbor_distance, 00179 OptionBase::buildoption, "Whether to normalize cost by distance of neighbor.\n"); 00180 00181 declareOption(ol, "ordered_vectors", &TangentLearner::ordered_vectors, 00182 OptionBase::buildoption, "Whether to apply a differential cost to each f_i so as to\n" 00183 "obtain an ordering similar to the one obtained with principal component analysis.\n"); 00184 00185 declareOption(ol, "n_neighbors", &TangentLearner::n_neighbors, OptionBase::buildoption, 00186 "Number of nearest neighbors to consider.\n" 00187 ); 00188 00189 declareOption(ol, "n_dim", &TangentLearner::n_dim, OptionBase::buildoption, 00190 "Number of tangent vectors to predict.\n" 00191 ); 00192 00193 declareOption(ol, "optimizer", &TangentLearner::optimizer, OptionBase::buildoption, 00194 "Optimizer that optimizes the cost function Number of tangent vectors to predict.\n" 00195 ); 00196 00197 //declareOption(ol, "tangent_predictor", &TangentLearner::tangent_predictor, OptionBase::buildoption, 00198 // "Func that specifies the parametrized mapping from inputs to predicted tangent planes\n" 00199 // ); 00200 00201 declareOption(ol, "architecture_type", &TangentLearner::architecture_type, OptionBase::buildoption, 00202 "For pre-defined tangent_predictor types: \n" 00203 " multi_neural_network : prediction[j] = b[j] + W[j]*tanh(c[j] + V[j]*x), where W[j] has n_hidden_units columns\n" 00204 " where there is a separate set of parameters for each of n_dim tangent vectors to predict.\n" 00205 " single_neural_network : prediction = b + W*tanh(c + V*x), where W has n_hidden_units columns\n" 00206 " where the resulting vector is viewed as a n_dim by n matrix\n" 00207 " linear : prediction = b + W*x\n" 00208 " embedding_neural_network: prediction[k,i] = d(e[k]/d(x[i), where e(x) is an ordinary neural\n" 00209 " network representing the embedding function (see output_type option)\n" 00210 " slack_embedding_neural_network: like embedding_neural_network but outside V is replaced by\n" 00211 " a call to no_bprop(V,V_slack), i.e. the gradient to it can\n" 00212 " reduced (0<V_slack<1) or eliminated (V_slack=1).\n" 00213 " embedding_quadratic: prediction[k,i] = d(e_k/d(x_i) = A_k x + b_k, where e_k(x) is a quadratic\n" 00214 " form in x, i.e. e_k = x' A_k x + b_k' x\n" 00215 " (empty string): specify explicitly the function with tangent_predictor option\n" 00216 "where (b,W,c,V) are parameters to be optimized.\n" 00217 ); 00218 00219 declareOption(ol, "V_slack", &TangentLearner::V_slack, OptionBase::buildoption, 00220 "Coefficient that multiplies gradient on outside V when architecture_type=='slack_embedding_neural_network'\n" 00221 ); 00222 00223 declareOption(ol, "n_hidden_units", &TangentLearner::n_hidden_units, OptionBase::buildoption, 00224 "Number of hidden units (if architecture_type is some kidn of neural network)\n" 00225 ); 00226 00227 declareOption(ol, "output_type", &TangentLearner::output_type, OptionBase::buildoption, 00228 "Default value (the only one considered if architecture_type != embedding_*) is\n" 00229 " tangent_plane: output the predicted tangent plane.\n" 00230 " embedding: output the embedding vector (only if architecture_type == embedding_*).\n" 00231 " tangent_plane+embedding: output both (in this order).\n" 00232 ); 00233 00234 00235 declareOption(ol, "batch_size", &TangentLearner::batch_size, OptionBase::buildoption, 00236 " how many samples to use to estimate the average gradient before updating the weights\n" 00237 " 0 is equivalent to specifying training_set->length() \n"); 00238 00239 declareOption(ol, "norm_penalization", &TangentLearner::norm_penalization, OptionBase::buildoption, 00240 "Factor that multiplies an extra penalization of the norm of f_i so that ||f_i|| be close to 1.\n" 00241 "The penalty is norm_penalization*sum_i (1 - ||f_i||^2)^2.\n" 00242 ); 00243 00244 declareOption(ol, "svd_threshold", &TangentLearner::svd_threshold, OptionBase::buildoption, 00245 "Threshold to accept singular values of F in solving for linear combination weights on tangent subspace.\n" 00246 ); 00247 00248 declareOption(ol, "projection_error_regularization", &TangentLearner::projection_error_regularization, OptionBase::buildoption, 00249 "Term added to the linear system matrix involved in fitting subspaces in the projection error computation.\n" 00250 ); 00251 00252 declareOption(ol, "parameters", &TangentLearner::parameters, OptionBase::learntoption, 00253 "Parameters of the tangent_predictor function.\n" 00254 ); 00255 00256 // Now call the parent class' declareOptions 00257 inherited::declareOptions(ol); 00258 } 00259 00260 void TangentLearner::build_() 00261 { 00262 00263 int n = PLearner::inputsize_; 00264 00265 if (n>0) 00266 { 00267 if (architecture_type == "multi_neural_network") 00268 { 00269 if (n_hidden_units <= 0) 00270 PLERROR("TangentLearner::Number of hidden units should be positive, now %d\n",n_hidden_units); 00271 } 00272 if (architecture_type == "single_neural_network") 00273 { 00274 if (n_hidden_units <= 0) 00275 PLERROR("TangentLearner::Number of hidden units should be positive, now %d\n",n_hidden_units); 00276 Var x(n); 00277 b = Var(n_dim*n,1,"b"); 00278 W = Var(n_dim*n,n_hidden_units,"W"); 00279 c = Var(n_hidden_units,1,"c"); 00280 V = Var(n_hidden_units,n,"V"); 00281 tangent_predictor = Func(x, b & W & c & V, b + product(W,tanh(c + product(V,x)))); 00282 output_f = tangent_predictor; 00283 } 00284 else if (architecture_type == "linear") 00285 { 00286 Var x(n); 00287 b = Var(n_dim*n,1,"b"); 00288 W = Var(n_dim*n,n,"W"); 00289 tangent_predictor = Func(x, b & W, b + product(W,x)); 00290 output_f = tangent_predictor; 00291 } 00292 else if (architecture_type == "embedding_neural_network") 00293 { 00294 if (n_hidden_units <= 0) 00295 PLERROR("TangentLearner::Number of hidden units should be positive, now %d\n",n_hidden_units); 00296 Var x(n); 00297 W = Var(n_dim,n_hidden_units,"W"); 00298 c = Var(n_hidden_units,1,"c"); 00299 V = Var(n_hidden_units,n,"V"); 00300 b = Var(n_dim,n,"b"); 00301 Var a = tanh(c + product(V,x)); 00302 Var tangent_plane = diagonalized_factors_product(W,1-a*a,V); 00303 tangent_predictor = Func(x, W & c & V, tangent_plane); 00304 embedding = product(W,a); 00305 if (output_type=="tangent_plane") 00306 output_f = tangent_predictor; 00307 else if (output_type=="embedding") 00308 output_f = Func(x, embedding); 00309 else if (output_type=="tangent_plane+embedding") 00310 output_f = Func(x, tangent_plane & embedding); 00311 } 00312 else if (architecture_type == "slack_embedding_neural_network") 00313 { 00314 if (n_hidden_units <= 0) 00315 PLERROR("TangentLearner::Number of hidden units should be positive, now %d\n",n_hidden_units); 00316 Var x(n); 00317 W = Var(n_dim,n_hidden_units,"W"); 00318 c = Var(n_hidden_units,1,"c"); 00319 V = Var(n_hidden_units,n,"V"); 00320 b = Var(n_dim,n,"b"); 00321 Var a = tanh(c + product(V,x)); 00322 Var tangent_plane = diagonalized_factors_product(W,1-a*a,no_bprop(V,V_slack)); 00323 tangent_predictor = Func(x, W & c & V, tangent_plane); 00324 embedding = product(W,a); 00325 if (output_type=="tangent_plane") 00326 output_f = tangent_predictor; 00327 else if (output_type=="embedding") 00328 output_f = Func(x, embedding); 00329 else if (output_type=="tangent_plane+embedding") 00330 output_f = Func(x, tangent_plane & embedding); 00331 } 00332 else if (architecture_type == "embedding_quadratic") 00333 { 00334 Var x(n); 00335 b = Var(n_dim,n,"b"); 00336 W = Var(n_dim*n,n,"W"); 00337 Var Wx = product(W,x); 00338 Var tangent_plane = Wx + b; 00339 tangent_predictor = Func(x, W & b, tangent_plane); 00340 embedding = product(new PlusVariable(b,Wx),x); 00341 if (output_type=="tangent_plane") 00342 output_f = tangent_predictor; 00343 else if (output_type=="embedding") 00344 output_f = Func(x, embedding); 00345 else if (output_type=="tangent_plane+embedding") 00346 output_f = Func(x, tangent_plane & embedding); 00347 } 00348 else if (architecture_type != "") 00349 PLERROR("TangentLearner::build, unknown architecture_type option %s (should be 'neural_network', 'linear', or empty string '')\n", 00350 architecture_type.c_str()); 00351 00352 if (parameters.size()>0 && parameters.nelems() == tangent_predictor->parameters.nelems()) 00353 tangent_predictor->parameters.copyValuesFrom(parameters); 00354 parameters.resize(tangent_predictor->parameters.size()); 00355 for (int i=0;i<parameters.size();i++) 00356 parameters[i] = tangent_predictor->parameters[i]; 00357 00358 if (training_targets=="local_evectors") 00359 tangent_targets = Var(n_dim,n); 00360 else if (training_targets=="local_neighbors") 00361 tangent_targets = Var(n_neighbors,n); 00362 else PLERROR("TangentLearner::build, option training_targets is %s, should be 'local_evectors' or 'local_neighbors'.", 00363 training_targets.c_str()); 00364 00365 Var proj_err = projection_error(tangent_predictor->outputs[0], tangent_targets, norm_penalization, n, 00366 normalize_by_neighbor_distance, use_subspace_distance, svd_threshold, 00367 projection_error_regularization, ordered_vectors); 00368 projection_error_f = Func(tangent_predictor->outputs[0] & tangent_targets, proj_err); 00369 cost_of_one_example = Func(tangent_predictor->inputs & tangent_targets, tangent_predictor->parameters, proj_err); 00370 00371 } 00372 } 00373 00374 // ### Nothing to add here, simply calls build_ 00375 void TangentLearner::build() 00376 { 00377 inherited::build(); 00378 build_(); 00379 } 00380 00381 extern void varDeepCopyField(Var& field, CopiesMap& copies); 00382 00383 void TangentLearner::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00384 { inherited::makeDeepCopyFromShallowCopy(copies); 00385 00386 deepCopyField(cost_of_one_example, copies); 00387 varDeepCopyField(b, copies); 00388 varDeepCopyField(W, copies); 00389 varDeepCopyField(c, copies); 00390 varDeepCopyField(V, copies); 00391 varDeepCopyField(tangent_targets, copies); 00392 deepCopyField(parameters, copies); 00393 deepCopyField(optimizer, copies); 00394 deepCopyField(tangent_predictor, copies); 00395 } 00396 00397 00398 int TangentLearner::outputsize() const 00399 { 00400 return output_f->outputsize; 00401 } 00402 00403 void TangentLearner::forget() 00404 { 00405 if (train_set) initializeParams(); 00406 stage = 0; 00407 } 00408 00409 void TangentLearner::train() 00410 { 00411 00412 VMat train_set_with_targets; 00413 VMat targets_vmat; 00414 if (!cost_of_one_example) 00415 PLERROR("TangentLearner::train: build has not been run after setTrainingSet!"); 00416 00417 if (training_targets == "local_evectors") 00418 { 00419 //targets_vmat = new LocalPCAVMatrix(train_set, n_neighbors, n_dim); 00420 PLERROR("local_evectors not yet implemented"); 00421 } 00422 else if (training_targets == "local_neighbors") 00423 { 00424 00425 targets_vmat = local_neighbors_differences(train_set, n_neighbors); 00426 //cout << targets_vmat; 00427 } 00428 else PLERROR("TangentLearner::train, unknown training_targets option %s (should be 'local_evectors' or 'local_neighbors')\n", 00429 training_targets.c_str()); 00430 00431 train_set_with_targets = hconcat(train_set, targets_vmat); 00432 train_set_with_targets->defineSizes(inputsize(),inputsize()*n_neighbors,0); 00433 int l = train_set->length(); 00434 int nsamples = batch_size>0 ? batch_size : l; 00435 Var totalcost = meanOf(train_set_with_targets, cost_of_one_example, nsamples); 00436 if(optimizer) 00437 { 00438 optimizer->setToOptimize(parameters, totalcost); 00439 optimizer->build(); 00440 } 00441 else PLERROR("TangentLearner::train can't train without setting an optimizer first!"); 00442 00443 // number of optimizer stages corresponding to one learner stage (one epoch) 00444 int optstage_per_lstage = l/nsamples; 00445 00446 PP<ProgressBar> pb; 00447 if(report_progress>0) 00448 pb = new ProgressBar("Training TangentLearner from stage " + tostring(stage) + " to " + tostring(nstages), nstages-stage); 00449 00450 int initial_stage = stage; 00451 bool early_stop=false; 00452 while(stage<nstages && !early_stop) 00453 { 00454 optimizer->nstages = optstage_per_lstage; 00455 train_stats->forget(); 00456 optimizer->early_stop = false; 00457 optimizer->optimizeN(*train_stats); 00458 train_stats->finalize(); 00459 if(verbosity>2) 00460 cout << "Epoch " << stage << " train objective: " << train_stats->getMean() << endl; 00461 ++stage; 00462 if(pb) 00463 pb->update(stage-initial_stage); 00464 } 00465 if(verbosity>1) 00466 cout << "EPOCH " << stage << " train objective: " << train_stats->getMean() << endl; 00467 } 00468 00469 void TangentLearner::initializeParams() 00470 { 00471 if (seed_>=0) 00472 manual_seed(seed_); 00473 else 00474 PLearn::seed(); 00475 00476 if (architecture_type=="single_neural_network") 00477 { 00478 if (smart_initialization) 00479 { 00480 V->matValue<<smartInitialization(train_set,n_hidden_units,smart_initialization,initialization_regularization); 00481 W->value<<(1/real(n_hidden_units)); 00482 b->matValue.clear(); 00483 c->matValue.clear(); 00484 } 00485 else 00486 { 00487 real delta = 1.0 / sqrt(real(inputsize())); 00488 fill_random_uniform(V->value, -delta, delta); 00489 delta = 1.0 / real(n_hidden_units); 00490 fill_random_uniform(W->matValue, -delta, delta); 00491 c->matValue.clear(); 00492 //fill_random_uniform(c->matValue,-3,3); 00493 //b->matValue.clear(); 00494 } 00495 } 00496 else if (architecture_type=="linear") 00497 { 00498 real delta = 1.0 / sqrt(real(inputsize())); 00499 b->matValue.clear(); 00500 fill_random_uniform(W->matValue, -delta, delta); 00501 } 00502 else if (architecture_type=="embedding_neural_network") 00503 { 00504 real delta = 1.0 / sqrt(real(inputsize())); 00505 fill_random_uniform(V->value, -delta, delta); 00506 delta = 1.0 / real(n_hidden_units); 00507 fill_random_uniform(W->matValue, -delta, delta); 00508 c->value.clear(); 00509 b->value.clear(); 00510 } 00511 else if (architecture_type=="embedding_quadratic") 00512 { 00513 real delta = 1.0 / sqrt(real(inputsize())); 00514 fill_random_uniform(W->matValue, -delta, delta); 00515 b->value.clear(); 00516 } 00517 else PLERROR("other types not handled yet!"); 00518 // Reset optimizer 00519 if(optimizer) 00520 optimizer->reset(); 00521 } 00522 00523 00524 void TangentLearner::computeOutput(const Vec& input, Vec& output) const 00525 { 00526 int nout = outputsize(); 00527 output.resize(nout); 00528 output << output_f(input); 00529 } 00530 00531 void TangentLearner::computeCostsFromOutputs(const Vec& input, const Vec& output, 00532 const Vec& target, Vec& costs) const 00533 { 00534 PLERROR("TangentLearner::computeCostsFromOutputs not defined for this learner"); 00535 } 00536 00537 TVec<string> TangentLearner::getTestCostNames() const 00538 { 00539 return getTrainCostNames(); 00540 } 00541 00542 TVec<string> TangentLearner::getTrainCostNames() const 00543 { 00544 TVec<string> cost(1); cost[0] = "projection_error"; 00545 return cost; 00546 } 00547 00548 00549 00550 } // end of namespace PLearn 00551 00552 00553 /* 00554 Local Variables: 00555 mode:c++ 00556 c-basic-offset:4 00557 c-file-style:"stroustrup" 00558 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00559 indent-tabs-mode:nil 00560 fill-column:79 00561 End: 00562 */ 00563 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :