PLearn 0.1
EntropyContrast.cc
Go to the documentation of this file.
00001 
00002 // -*- C++ -*-
00003 
00004 // EntropyContrast.cc
00005 //
00006 // Copyright (C) 2004  Dan Popovici 
00007 // 
00008 // Redistribution and use in source and binary forms, with or without
00009 // modification, are permitted provided that the following conditions are met:
00010 // 
00011 //  1. Redistributions of source code must retain the above copyright
00012 //     notice, this list of conditions and the following disclaimer.
00013 // 
00014 //  2. Redistributions in binary form must reproduce the above copyright
00015 //     notice, this list of conditions and the following disclaimer in the
00016 //     documentation and/or other materials provided with the distribution.
00017 // 
00018 //  3. The name of the authors may not be used to endorse or promote
00019 //     products derived from this software without specific prior written
00020 //     permission.
00021 // 
00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00032 // 
00033 // This file is part of the PLearn library. For more information on the PLearn
00034 // library, go to the PLearn Web site at www.plearn.org
00035 
00036 /* *******************************************************      
00037  * $Id: EntropyContrast.cc 3994 2005-08-25 13:35:03Z chapados $ 
00038  ******************************************************* */
00039 
00041 #include "EntropyContrast.h"
00042 #include <plearn/vmat/VMat_basic_stats.h>
00043 //#include "TMat_maths.h"
00044 #include <plearn/math/plapack.h>
00045 #include <plearn/math/random.h>
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 EntropyContrast::EntropyContrast() 
00050     :nconstraints(4) //TODO: change to input_size 
00051 {
00052     learning_rate = 0.001;
00053     decay_factor = 0;
00054     weight_real = weight_gen = weight_extra = 1;
00055     nconstraints = 0 ; 
00056     n = 0 ; 
00057     evaluate_every_n_epochs = 1;
00058     evaluate_first_epoch = true;
00059     evaluation_method = "no_evaluation";
00060     nhidden = 0 ;
00061     alpha = 0.0 ; 
00062 }
00063 
00064 PLEARN_IMPLEMENT_OBJECT(EntropyContrast, 
00065                         "Performs a EntropyContrast search", 
00066                         "Detailed Description ");
00067 
00068 void EntropyContrast::declareOptions(OptionList& ol)
00069 {
00070 
00071     declareOption(ol, "nconstraints", &EntropyContrast::nconstraints, OptionBase::buildoption,
00072                   "The number of constraints to create (that's also the outputsize)");
00073     declareOption(ol, "learning_rate", &EntropyContrast::learning_rate, OptionBase::buildoption,
00074                   "The learning rate of the algorithm");
00075     declareOption(ol, "decay_factor", &EntropyContrast::decay_factor, OptionBase::buildoption,
00076                   "The decay factor of the learning rate");
00077 
00078     declareOption(ol, "weight_decay_hidden", &EntropyContrast::weight_decay_hidden, OptionBase::buildoption,
00079                   "The decay factor for the hidden units");
00080     declareOption(ol, "weight_decay_output", &EntropyContrast::weight_decay_output, OptionBase::buildoption,
00081                   "The decay factor for the output units");
00082 
00083     declareOption(ol, "cost_real", &EntropyContrast::cost_real, OptionBase::buildoption,
00084                   "The method to compute the real cost");
00085     declareOption(ol, "cost_gen", &EntropyContrast::cost_gen, OptionBase::buildoption,
00086                   "The method to compute the cost for the generated cost");
00087     declareOption(ol, "cost_extra", &EntropyContrast::cost_extra, OptionBase::buildoption,
00088                   "The method to compute the extra cost");
00089     declareOption(ol, "gen_method", &EntropyContrast::gen_method, OptionBase::buildoption,
00090                   "Method used to generate new points");
00091     declareOption(ol, "weight_real", &EntropyContrast::weight_real, OptionBase::buildoption,
00092                   "the relative weight of the cost of the real data, by default it is 1");
00093     declareOption(ol, "weight_gen", &EntropyContrast::weight_gen, OptionBase::buildoption,
00094                   "the relative weight of the cost of the generated data, by default it is 1");  
00095     declareOption(ol, "weight_extra", &EntropyContrast::weight_extra, OptionBase::buildoption,
00096                   "the relative weight of the extra cost, by default it is 1");  
00097     declareOption(ol, "evaluation_method", &EntropyContrast::evaluation_method, OptionBase::buildoption,
00098                   "Method for evaluation of constraint learning");  
00099     declareOption(ol, "evaluate_every_n_epochs", &EntropyContrast::evaluate_every_n_epochs, OptionBase::buildoption,
00100                   "Number of epochs after which the constraints evaluation is done");  
00101     declareOption(ol, "test_set", &EntropyContrast::test_set, OptionBase::buildoption,
00102                   "VMat test set");  
00103     declareOption(ol, "nhidden", &EntropyContrast::nhidden, OptionBase::buildoption,
00104                   "the number of hidden units");
00105 
00106     // Now call the parent class' declareOptions
00107     inherited::declareOptions(ol);
00108 }
00109 
00110 // Functions for the continuous case
00111 
00115 void EntropyContrast::initialize_NNcontinuous()
00116 {
00117     fill_random_uniform(w,-10.0,10.0) ; 
00118     fill_random_uniform(v,-10.0,10.0) ; 
00119 
00120     fill_random_uniform(bias_hidden,-10.0,10.0) ; 
00121     fill_random_uniform(bias_output,-10.0,10.0) ; 
00122 
00123     mu_f.fill(0.0) ; 
00124     sigma_f.fill(1.0) ;
00125 
00126     mu_f_hat.fill(0.0) ; 
00127     sigma_f_hat.fill(1.0) ; 
00128 
00129     // the extra_diversity constraint
00130     mu_g = 0.0 ; 
00131     sigma_g = 1.0 ; 
00132     sigma_g.fill(1.0) ; 
00133     mu_g.fill(0.0) ;
00134 
00135     mu_f_square.fill(0.0) ; 
00136     sigma_f_square.fill(1.0) ; 
00137 
00138 
00139     full = 1.0 ; 
00140 }
00141 
00145 void EntropyContrast::computeNNcontinuous_hidden(const Vec& input_units,Vec &hidden_units)
00146 {
00147 
00148     for (int i = 0 ; i < nhidden ; ++i )
00149     {
00150         hidden_units[i] = bias_hidden[i] ; 
00151         for (int j = 0 ; j < n ; ++j)
00152             hidden_units[i] += v(i,j) * input_units[j] ; 
00153     }
00154     compute_tanh(hidden_units,hidden_units) ; 
00155 
00156 }
00157 
00158 
00162 void EntropyContrast::computeNNcontinuous_constraints(Vec& hidden_units,Vec &output_units)
00163 {
00164     for (int i = 0 ; i < nconstraints ; ++i )
00165     {
00166         output_units[i] = bias_output[i] ; 
00167         for (int j = 0 ; j < nhidden ; ++j)
00168             output_units[i] += w(i,j) * hidden_units[j] ; 
00169     }
00170 }
00171 
00172 
00176 void EntropyContrast::get_NNcontinuous_output(const Vec & input_units,Vec &output_units,Vec &hidden_units)  
00177 {   
00178 
00179     computeNNcontinuous_hidden(input_units,hidden_units) ;   // compute the hidden units 
00180 
00181     computeNNcontinuous_constraints(hidden_units,output_units) ;   // compute the hidden units 
00182 
00183 }
00184 
00189 void EntropyContrast::gen_normal_0_1(Vec & output)
00190 {
00191     for (int i = 0 ; i < output.length() ; ++ i) { 
00192         output[i] = gaussian_01(); 
00193     }
00194 }
00195 
00196 
00200 void EntropyContrast::update_mu_sigma_f(const Vec & f_x,Vec & mu, Vec &sigma) 
00201 {
00202     // :update mu_f_hat 
00203     mu = mu * alpha  + f_x * (1-alpha) ; 
00204 
00205     // :update sigma_f_hat
00206     sigma = alpha * (sigma) + (1-alpha) * square(f_x - mu) ;
00207 
00208 }
00209 
00213 void EntropyContrast::update_alpha(int stage,int current_input_index)
00214 {
00215 
00216     if (stage==0)
00217         alpha = 1.0 - 1.0 / ( current_input_index + 2 )  ; 
00218     else
00219         alpha = 1.0 - 1.0/inputsize;
00220 }
00221 
00225 void EntropyContrast::compute_diversity_cost(const Vec & f_x,const Vec & cost,Vec & grad_C_extra_cost_wrt_f_x)
00226 {     
00227     cost.fill (0.0);
00228     for (int i = 0; i < nconstraints; ++i)
00229     {
00230         for (int j = 0; j <= i; ++j)
00231             cost[i] += pow (f_x[j], 2);
00232 
00233         cost[i] /= i + 1;
00234     }
00235     Vec full_sum(nconstraints) ;
00236     full_sum[0] =  (pow(f_x[0],2) - (sigma_f[0] + pow(mu_f[0],2) ) ) ;
00237     for (int i = 1 ; i<nconstraints ; ++i)
00238     {
00239         full_sum[i] = full_sum[i-1] + (pow(f_x[i],2) - (sigma_f[i] + pow(mu_f[i],2) ) ) ;
00240         grad_C_extra_cost_wrt_f_x[i] = full_sum[i-1] * f_x[i] / train_set.length() ; 
00241     }
00242 
00243 }
00244 
00245 
00246 
00250 void EntropyContrast::compute_df_dx(Mat &df_dx, const Vec &input) 
00251 {
00252     Vec ones(nhidden); 
00253     ones.fill(1); 
00254     Vec hidden(nhidden);
00255     hidden = product(v,input);
00256     hidden = hidden + bias_hidden;
00257     Vec diag(nhidden) ;
00258     diag = ones - square(tanh(hidden)) ; 
00259     diagonalizedFactorsProduct(df_dx,w,diag,v);
00260 }
00261 
00265 void EntropyContrast:: get_grad_log_variance_wrt_f(Vec & grad, const Vec& f_x, const Vec& mu, const Vec& sigma) 
00266 {
00267     for (int i = 0 ; i < f_x.length() ; ++i) {
00268         grad[i] = 2 * (f_x[i] - mu[i]) / sigma[i];
00269     }
00270 }
00271 
00275 void EntropyContrast::set_NNcontinuous_gradient(Vec &grad_C_real_wrt_f_x,Mat& grad_H_f_x_wrt_w, Mat& grad_H_f_x_wrt_v, 
00276                                                 Vec & hidden_units, Vec & input_units,  Vec &grad_H_f_x_wrt_bias_hidden, Vec &grad_H_f_x_wrt_bias_output)
00277 {
00278     // set the gradiant grad_H_f_x_wrt_w ; 
00279 
00280     for (int i = 0 ; i < nconstraints ; ++ i)
00281         for (int j = 0 ; j < nhidden ; ++j)
00282         {
00283             grad_H_f_x_wrt_w(i,j) = grad_C_real_wrt_f_x[i] * hidden_units[j] ; 
00284         }
00285 
00286     // set the gradiant grad_H_f_x_wrt_bias_z_output ; 
00287     for (int i = 0 ; i < nconstraints ; ++i)
00288         grad_H_f_x_wrt_bias_output[i] = grad_C_real_wrt_f_x[i] ;
00289 
00290 
00291 
00292     // set the gradiant grad_H_f_x_wrt_v ;  
00293     real sum; // keep sum v_i_k * x_k
00294     real grad_tmp ; // keep sum grad_C_wrt_f * grad_f_k_wrt_z
00295     for (int i = 0 ; i < nhidden ; ++ i)
00296     {
00297         sum = 0 ; 
00298         for (int k = 0 ; k < n ; ++ k)
00299             sum+=v(i,k) * input_units[k] ; 
00300 
00301         grad_tmp = 0; 
00302         for (int l = 0 ; l < nconstraints ; ++l)
00303             grad_tmp += grad_C_real_wrt_f_x[l] * w(l,i)  ; 
00304 
00305         for(int j=0 ; j<n ; ++j)   
00306             grad_H_f_x_wrt_v(i,j) = grad_tmp * (1 - tanh(bias_hidden[i] + sum) * tanh(bias_hidden[i] + sum)) * input_units[j];
00307 
00308         grad_H_f_x_wrt_bias_hidden[i] = grad_tmp * (1 - tanh(bias_hidden[i] + sum) * tanh(bias_hidden[i] + sum));
00309 
00310     }
00311 }
00315 void EntropyContrast::update_NNcontinuous_from_extra_cost()
00316 {
00317     //TODO: maybe change the learning_rate used for the extra_cost  
00318 
00319     for (int i = 0 ; i < nhidden ; ++i) { 
00320         for(int j = 0 ; j < n ; ++j) {
00321             v(i,j) -= learning_rate * grad_extra_wrt_v(i,j);
00322         }
00323     }
00324 
00325     for (int i = 0 ; i < nconstraints ; ++i) {
00326         for(int j = 0 ; j < nhidden ; ++j) {
00327             w(i,j) -= learning_rate * grad_extra_wrt_w(i,j);
00328         }
00329     }
00330 
00331     for(int j = 0 ; j < nhidden ; ++j) {
00332         bias_hidden[j] -= learning_rate * grad_extra_wrt_bias_hidden[j]; 
00333     }
00334 
00335 }
00339 void EntropyContrast::update_NNcontinuous()
00340 {
00341     for (int i = 0 ; i < nhidden ; ++i) 
00342         for(int j = 0 ; j < n ; ++j) 
00343             v(i,j)-= learning_rate * (grad_H_f_x_wrt_v(i,j) - grad_H_f_x_hat_wrt_v(i,j)) + weight_decay_hidden * v(i,j)  ;
00344 
00345     for (int i = 0 ; i < nconstraints ; ++i) 
00346         for(int j = 0 ; j < nhidden ; ++j) 
00347             w(i,j)-= learning_rate * (grad_H_f_x_wrt_w(i,j) - grad_H_f_x_hat_wrt_w(i,j)) + weight_decay_output * w(i,j)  ;
00348 
00349     for(int j = 0 ; j < nhidden ; ++j) 
00350         bias_hidden[j] -= learning_rate * (grad_H_f_x_wrt_bias_hidden[j] - grad_H_f_x_hat_wrt_bias_hidden[j] ); 
00351 
00352     for(int j = 0 ; j < nconstraints ; ++j) 
00353         bias_output[j] -= learning_rate * (grad_H_f_x_wrt_bias_output[j] - grad_H_f_x_hat_wrt_bias_output[j] ); 
00354 } 
00355 
00359 void EntropyContrast::compute_extra_grad_wrt_df_dx(Mat& grad_C_wrt_df_dx)
00360 {
00361     for(int i=0 ; i<n ; i++){
00362         grad_C_wrt_df_dx[0][i] = 0.0 ; 
00363     }
00364 
00365     // compute dot product g_i , g_j
00366     Mat dot_g(nconstraints,nconstraints); 
00367     for (int i=0; i<nconstraints ;++i) {
00368         for (int j=0; j<i ; ++j) {
00369             dot_g(i,j) = dot(df_dx(i),df_dx(j));
00370         }
00371     }
00372 
00373     Vec cost(nconstraints);
00374     Vec d(nconstraints);
00375 
00376     for(int i=1 ; i<nconstraints ; ++i) { 
00377         cost[i] = 0;
00378         d[i] = 0;
00379         real sum = 0;
00380         for(int j=0 ; j<i ; ++j) { 
00381             d[i] += pownorm(df_dx(i))*pownorm(df_dx(j));
00382             sum += square(dot_g(i,j));
00383         }
00384         cost[i] += sum / d[i];
00385     }
00386 
00387     for (int j = 1; j<nconstraints; ++j ) {
00388         for (int k = 0; k<n ; ++k) {
00389             grad_C_wrt_df_dx(j,k) = 0;
00390             for (int i = 0; i < j ; ++i) {
00391                 grad_C_wrt_df_dx(j,k) += 2 * dot_g(j,i) * df_dx(i,k); 
00392             }
00393             grad_C_wrt_df_dx(j,k) /= d[j];
00394 
00395             grad_C_wrt_df_dx(j,k) -= (2*cost[j]*df_dx(j,k)/norm(df_dx(j)));
00396         }
00397     }
00398 }
00399 
00403 void EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(Mat &grad_C_wrt_df_dx,const Vec &input) 
00404 {
00405 
00406     //compute a = 1 - tanh^2(v * x)
00407     //        b = 1 - tanh( v * x ) ; 
00408     Vec ones(nhidden) ; 
00409     Vec b(nhidden) ; 
00410     ones.fill(1) ; 
00411     Vec hidden(nhidden);
00412     hidden = product(v,input);
00413     hidden = hidden + bias_hidden;
00414     Vec diag(nhidden) ;
00415     diag = ones - square(tanh(hidden)) ; 
00416 
00417     b = ones - tanh(hidden) ; 
00418 
00419     Mat a(nhidden,nhidden) ; 
00420     a.fill(0.0) ; 
00421     addToDiagonal(a,diag) ; 
00422     // compute dC / dw = dC/dg * v' * a
00423     Mat temp(nconstraints,nhidden);
00424     productTranspose(temp,grad_C_wrt_df_dx,v) ; 
00425     product(grad_extra_wrt_w,temp,a) ;
00426 
00427     // compute dC/dv = a * w' * dC/dg -2 * (dC/da * b * a) x' ; 
00428     {
00429         Mat tmp(nhidden,nconstraints) ; 
00430         product(tmp,a,transpose(w)) ; 
00431         product(grad_extra_wrt_v,tmp,grad_C_wrt_df_dx) ; 
00432     }
00433 
00434     // compute dC/da
00435     {
00436         Vec grad_C_wrt_a ; 
00437         Mat tmp(nhidden,n) ; 
00438         product(tmp,transpose(w),grad_C_wrt_df_dx) ; 
00439         Mat tmp_a(nhidden,nhidden) ; 
00440         product(tmp_a,tmp,transpose(v)) ;
00441 
00442         //    grad_extra_wrt_v += (-2 * diag * b * diag(tmp_a) ) * transpose(input) ; 
00443         Vec temp(nhidden) ; 
00444         for (int i= 0 ; i < nhidden ; ++i)
00445         {
00446             temp[i] = (-2) * tmp_a(i,i) * b[i] * a(i,i); 
00447 
00448             for (int j = 0 ; j < n ; ++j)
00449             {
00450                 grad_extra_wrt_v(i,j) +=  temp[i] * input[j];
00451             }
00452         }
00453         grad_extra_wrt_bias_hidden  = temp;
00454     }
00455 
00456 }
00457 
00458 
00459 
00460 void EntropyContrast::build_()
00461 {
00462     if (!train_set.isNull()) 
00463     {
00464         n = train_set->width() ; // setting the input dimension
00465 
00466         inputsize = train_set->length() ; // set the number of training inputs
00467 
00468         x.resize(n) ; // the current input sample, presented 
00469 
00470         f_x.resize(nconstraints) ; // the constraints on the real sample 
00471 
00472         grad_C_real_wrt_f_x.resize(nconstraints); // the gradient of the real cost wrt to the constraints
00473 
00474         x_hat.resize(n) ; // the current generated sample
00475 
00476         f_x_hat.resize(nconstraints) ;  // the constraints on the generated sample
00477 
00478         grad_C_generated_wrt_f_x_hat.resize(nconstraints); // the gradient of the generated cost wrt to the constraints
00479 
00480         grad_C_extra_cost_wrt_f_x.resize(nconstraints);
00481 
00482         starting_learning_rate = learning_rate;
00483 
00484         n_seen_examples = 0;
00485 
00486 
00487         w.resize(nconstraints,nhidden)   ; // setting the size of the weights between the hidden layer and the output(the constraints) 
00488 
00489         z_x.resize(nhidden) ; // set the size of the hidden units
00490         z_x_hat.resize(nhidden) ; // set the size of the hidden units     
00491 
00492         v.resize(nhidden,n) ; // set the size of the weights between the hidden input and the hidden units      
00493 
00494         mu_f.resize(nconstraints)  ; // the average of the constraints over time, used in the computation on certain gradiants
00495 
00496         mu_f_hat.resize(nconstraints)  ; // the average of the constraints over time, used in the computation on certain gradiants
00497 
00498         sigma_f.resize(nconstraints) ; // the variance of the constraints over time,, sued in the computation on certain gradiants
00499 
00500         sigma_f_hat.resize(nconstraints) ;//the variance of the constraints over time,, sued in the computation on certain gradiants
00501 
00502         mu_f_square.resize(nconstraints)  ; 
00503         sigma_f_square.resize(nconstraints)  ;  
00504 
00505         bias_hidden.resize(nhidden) ; 
00506         bias_output.resize(nconstraints); 
00507 
00508         grad_H_f_x_wrt_bias_output.resize(nconstraints) ; 
00509         grad_H_f_x_wrt_bias_hidden.resize(nhidden) ;
00510 
00511         grad_H_f_x_hat_wrt_bias_output.resize(nconstraints) ; 
00512         grad_H_f_x_hat_wrt_bias_hidden.resize(nhidden) ;
00513 
00514         grad_H_f_x_hat_wrt_w.resize(nconstraints,nhidden); 
00515         grad_H_f_x_wrt_w.resize(nconstraints,nhidden) ; 
00516 
00517         grad_H_g_wrt_w.resize(nconstraints,nhidden) ;
00518 
00519 
00520         grad_H_f_x_wrt_v.resize(nhidden,n) ; 
00521         grad_H_f_x_hat_wrt_v.resize(nhidden,n) ; 
00522 
00523         // used for the computation of the extra diversity constraints
00524         sigma_g.resize(nconstraints) ; 
00525         mu_g.resize(nconstraints) ; 
00526         g_x.resize(nconstraints) ;  
00527         grad_C_wrt_df_dx.resize(nconstraints,n) ; 
00528         df_dx.resize(nconstraints,n) ; 
00529 
00530         grad_extra_wrt_w.resize(nconstraints, nhidden) ; 
00531         grad_extra_wrt_v.resize(nhidden, n) ; 
00532 
00533         full_sum.resize(nconstraints) ;
00534     }
00535 
00536 }
00537 
00538 // ### Nothing to add here, simply calls build_
00539 void EntropyContrast::build()
00540 {
00541     inherited::build();
00542     build_();
00543 }
00544 
00545 
00546 void EntropyContrast::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00547 {
00548     inherited::makeDeepCopyFromShallowCopy(copies);
00549     //    deepCopyField(eigenvecs, copies);
00550 }
00551 
00552 
00553 
00554 int EntropyContrast::outputsize() const
00555 {
00556     return nconstraints;
00557 }
00558 
00559 void EntropyContrast::forget()
00560 {
00561     // Initialization
00562 
00563     initialize_NNcontinuous() ; 
00564 }
00565 
00566 void EntropyContrast::train()
00567 {
00568     int t ; 
00569     //    manual_seed(12345678);
00570     forget();    
00571     real cost;
00572     Vec save(n);
00573     for (;stage < nstages;stage++)
00574     {
00575         cost = 0;
00576         cout << getInfo() << endl;
00577         cout << "Stage = " << stage << endl;
00578         cout << "Learning rate = " << learning_rate << endl;
00579 
00580         for (t = 0 ; t < train_set.length(); ++ t)  
00581         {
00582             update_alpha(stage,t) ; // used in the update of the running averages 
00583 
00584             train_set->getRow(t,x);
00585 
00587             // Real data section
00589 
00590             // Get constraint output for real data (fill the f_x field)
00591             get_NNcontinuous_output(x,f_x,z_x) ; // this also computes the value of the hidden units , which will be needed when we compute all the gradiants 
00592 
00593             update_mu_sigma_f(f_x,mu_f,sigma_f) ; 
00594 
00595             if (cost_real == "constraint_variance") {
00596                 update_mu_sigma_f(square(f_x),mu_f_square,sigma_f_square); 
00597             }
00598 
00599             // Get gradient for cost function for real data (fill grad_C_real_wrt_f_x)
00600             if(cost_real == "constraint_variance") {
00601                 // compute gradiant of the cost wrt to f_x
00602                 get_grad_log_variance_wrt_f(grad_C_real_wrt_f_x, f_x, mu_f, sigma_f); 
00603             }
00604 
00605             // Adjust weight of the gradient
00606             grad_C_real_wrt_f_x *= weight_real;
00607 
00609             // Extra cost function
00611             if(cost_extra == "variance_sum_square_constraints") {
00612                 compute_diversity_cost(f_x,g_x,grad_C_extra_cost_wrt_f_x) ; // this also computes the gradiant extra_cost wrt to the constrains f_i(x) grad_C_extra_cost_wrt_f_x
00613 
00614                 grad_C_extra_cost_wrt_f_x *= weight_extra;
00615             }
00616 
00617             if(cost_extra == "derivative") {
00618                 compute_df_dx(df_dx,x); 
00619                 compute_extra_grad_wrt_df_dx(grad_C_wrt_df_dx); 
00620 
00621                 grad_C_wrt_df_dx *= weight_extra;
00622             }
00623 
00624             // Set gradient for the constraint using real data
00625             // set the gradiant of the cost wrt to the weights w,v and to the bias
00626             set_NNcontinuous_gradient(grad_C_real_wrt_f_x,grad_H_f_x_wrt_w,grad_H_f_x_wrt_v,z_x,x,
00627                                       grad_H_f_x_wrt_bias_hidden,grad_H_f_x_wrt_bias_output);
00628 
00629             if (cost_extra == "derivative"){
00630                 set_NNcontinuous_gradient_from_extra_cost(grad_C_wrt_df_dx,x) ;   
00631             }
00632 
00633             if (cost_extra == "variance_sum_square_constraints") {
00634                 // combine the grad_real & grad_extra
00635                 for(int it=0; it<grad_C_real_wrt_f_x.length(); it++) {
00636                     grad_C_real_wrt_f_x[it] += grad_C_extra_cost_wrt_f_x[it];
00637                 }
00638             }        
00639 
00641             // Generated data section
00643 
00644             // Generate a new point (fill x_hat)
00645             if(gen_method == "N(0,1)") {
00646                 gen_normal_0_1(x_hat) ; 
00647             }
00648             // Get constraint output from generated data (fill the f_x_hat field)
00649 
00650             get_NNcontinuous_output(x_hat,f_x_hat,z_x_hat);
00651             update_mu_sigma_f(f_x_hat,mu_f_hat,sigma_f_hat); 
00652             // Get gradient for cost function for generated data (fill grad_C_generated_wrt_f_x_hat)
00653 
00654             if(cost_gen == "constraint_variance") {
00655                 get_grad_log_variance_wrt_f(grad_C_generated_wrt_f_x_hat,f_x_hat,mu_f_hat,sigma_f_hat); 
00656             }
00657 
00658             // Adjust weight of the gradient
00659             grad_C_generated_wrt_f_x_hat *= weight_gen;
00660 
00661             // Set gradient for the constraint using generated data
00662 
00663             set_NNcontinuous_gradient(grad_C_generated_wrt_f_x_hat,grad_H_f_x_hat_wrt_w,grad_H_f_x_hat_wrt_v,z_x_hat,x_hat,
00664                                       grad_H_f_x_hat_wrt_bias_hidden,grad_H_f_x_hat_wrt_bias_output);       
00665 
00667             // Update
00669 
00670             update_NNcontinuous();
00671             if (cost_extra=="derivative") {
00672                 update_NNcontinuous_from_extra_cost();
00673             }
00674             n_seen_examples++;
00675 
00676             full = alpha *  full  + (1-alpha) * (f_x[0] * f_x[0] - (sigma_f[0] + mu_f[0]*mu_f[0])) * (f_x[1] * f_x[1] - (sigma_f[1] + mu_f[1]*mu_f[1]) ) ;
00677 
00678             real den = 0;
00679             real nom = 0;
00680             for(int i=0 ; i<nconstraints ; ++i) { 
00681                 for(int j=0 ; j<i ; ++j) { 
00682                     den += pownorm(df_dx(i))*pownorm(df_dx(j));
00683                     nom += square(dot(df_dx(i),df_dx(j)));
00684                 }
00685             }
00686             cost += nom / den;
00687 
00688         }
00689 
00690         learning_rate = starting_learning_rate / (1 + decay_factor*n_seen_examples);
00691 
00693         // Train evaluation
00695         cout << "cov = " << full/train_set.length() << endl ; 
00696         cout << "var f_square: " << sigma_f_square[0] << " "<< sigma_f_square[1] <<  endl; 
00697         cout << "corr: " << full / sqrt(sigma_f_square[0] / sqrt(sigma_f_square[1])) << endl; 
00698         cout << "f : " << f_x << endl;   
00699         cout << "cost: " << cost << endl;
00700 
00701         train_set->getRow(0,x);
00702         compute_df_dx(df_dx,x);
00703 
00704         
00705         cout << "angle: " << (dot(df_dx(0),df_dx(1))/(norm(df_dx(1))*norm(df_dx(0)))) << endl;
00706         
00707 //        cout << "df/dx: " << df_dx(0) << endl;
00708 
00709         save << df_dx(0);
00710         cout << "--------------------------------" << endl;
00711 
00712         /*
00713           ostringstream sss;
00714           sss << t;
00715           string sstage = sss.str();
00716           ofstream file1((string("gen1_")+sstage+".dat").c_str());
00717           ofstream file2(("gen2_"+sstage+".dat").c_str());
00718           ofstream file3(("gen3_"+sstage+".dat").c_str());
00719 
00720 
00721           for(int t=0 ; t<train_set.length() ; ++t) { 
00722           train_set->getRow(t,x);
00723 
00724           compute_df_dx(df_dx,x);
00725 
00726           file1 << x << " " << df_dx(0) << endl;
00727           file2 << x << " " << df_dx(1) << endl;
00728           file3 << x << " " << dot(df_dx(0),df_dx(1))/(pownorm(df_dx(0))*pownorm(df_dx(1))) << endl;
00729 
00730 
00731           }
00732 
00733           file1.close();
00734           file2.close();
00735           file3.close();
00736         */
00737 
00738     }
00739 
00740     /*
00741       FILE * f1 = fopen("gen1.dat","wt") ;
00742       FILE * f2 = fopen("gen2.dat","wt") ;
00743       FILE * f3 = fopen("gen3.dat","wt") ;
00744 
00745       for (int i = -10 ; i <= 10 ; i+=2) {
00746       for (int j = -1 ; j <= 9 ; j+=2 ) {
00747       for (int k = -1 ; k <= 9 ; k+=3 ) {
00748       Mat res(2,3) ; 
00749       Vec input(3) ;
00750       Vec ones(nhidden) ; 
00751       ones.fill(1) ; 
00752       input[0] = (real)i / 10 ; 
00753       input[1] = (real)j / 10 ;
00754       input[2] = (real)k / 100 ; 
00755       Vec hidden(nhidden);
00756       hidden = product(v,input) ; 
00757       Vec diag(nhidden) ;
00758       diag = ones - square(tanh(hidden)) ; 
00759       diagonalizedFactorsProduct(res,w,diag,v); 
00760       fprintf(f1,"%f %f %f %f %f %f\n",(real)i/10,(real)j/10,(real)k/100,res(0,0),res(0,1),res(0,2)); 
00761       fprintf(f2,"%f %f %f %f %f %f\n",(real)i/10,(real)j/10,(real)k/100,res(1,0),res(1,1),res(1,2)); 
00762       real norm0 = sqrt(res(0,0)*res(0,0)+res(0,1)*res(0,1)+res(0,2)*res(0,2)) ; 
00763       real norm1 = sqrt(res(1,0)*res(1,0)+res(1,1)*res(1,1)+res(1,2)*res(1,2)) ; 
00764       real angle = res(0,0) / norm0 * res(1,0) / norm1 + res(0,1) / norm0 * res(1,1) / norm1 + res(0,2) / norm0 * res(1,2) / norm1 ;  
00765       fprintf(f3,"%f %f %f %f\n",(real)i/10,(real)j/10,(real)k/100,angle) ; 
00766       //                fprintf(f2,"%f %f %f %f\n",(real)i/10,(real)j/10,res(1,0),res(1,1)) ; 
00767       }
00768       }
00769       }
00770 
00771       fclose(f1) ; 
00772       fclose(f2) ; 
00773       fclose(f3) ; 
00774 
00775     */
00776 
00777 }
00778 void EntropyContrast::computeOutput(const Vec& input, Vec& output) const
00779 {
00780 }    
00781 
00782 
00783 void EntropyContrast::reconstruct(const Vec& output, Vec& input) const
00784 {
00785 }
00786 
00787 void EntropyContrast::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00788                                               const Vec& target, Vec& costs) const
00789 {
00790 }                                
00791 
00792 TVec<string> EntropyContrast::getTestCostNames() const
00793 {
00794     return TVec<string>(1,"squared_reconstruction_error");
00795 }
00796 
00797 TVec<string> EntropyContrast::getTrainCostNames() const
00798 {
00799     return TVec<string>();
00800 }
00801 
00802 
00803 
00804 } // end of namespace PLearn
00805 
00806 
00807 /*
00808   Local Variables:
00809   mode:c++
00810   c-basic-offset:4
00811   c-file-style:"stroustrup"
00812   c-file-offsets:((innamespace . 0)(inline-open . 0))
00813   indent-tabs-mode:nil
00814   fill-column:79
00815   End:
00816 */
00817 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines