PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // BinarizeModule.cc 00004 // 00005 // Copyright (C) 2007 Yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Yoshua Bengio 00036 00041 #include "BinarizeModule.h" 00042 00043 namespace PLearn { 00044 using namespace std; 00045 00046 PLEARN_IMPLEMENT_OBJECT( 00047 BinarizeModule, 00048 "Map probabilities in (0,1) to a bit in {0,1}, by sampling or hard threshold\n", 00049 "Map input probabilities in (0,1) to a bit in {0,1}, either according to\n" 00050 "a hard threshold (> 0.5), or by sampling, and ALLOW GRADIENTS\n" 00051 "TO PROPAGATE BACKWARDS. The heuristic for gradient propagation is the following:\n" 00052 " If the output is incorrect (sign of gradient pushes towards the other value)\n" 00053 " then propagate gradient as is,\n" 00054 " else do not propagate any gradient.\n" 00055 ); 00056 00058 // BinarizeModule // 00060 BinarizeModule::BinarizeModule() 00061 : stochastic(true),copy_gradients(false),saturate_gradients(false) 00062 { 00063 } 00064 00066 // declareOptions // 00068 void BinarizeModule::declareOptions(OptionList& ol) 00069 { 00070 declareOption(ol, "stochastic", &BinarizeModule::stochastic, 00071 OptionBase::buildoption, 00072 "If true then sample the output bits stochastically, else use a hard threshold.\n"); 00073 00074 declareOption(ol, "copy_gradients", &BinarizeModule::copy_gradients, 00075 OptionBase::buildoption, 00076 "If true then simply copy the gradients through with no alteration.\n"); 00077 00078 declareOption(ol, "saturate_gradients", &BinarizeModule::saturate_gradients, 00079 OptionBase::buildoption, 00080 "If true then multiply output gradients by p(1-p) for input probability p.\n"); 00081 00082 inherited::declareOptions(ol); 00083 } 00084 00086 // build_ // 00088 void BinarizeModule::build_() 00089 { 00090 } 00091 00093 // build // 00095 void BinarizeModule::build() 00096 { 00097 inherited::build(); 00098 build_(); 00099 } 00100 00102 // bpropAccUpdate // 00104 void BinarizeModule::bpropAccUpdate(const TVec<Mat*>& ports_value, 00105 const TVec<Mat*>& ports_gradient) 00106 { 00107 PLASSERT( ports_value.length() == nPorts() && ports_gradient.length() == nPorts()); 00108 00109 Mat* input = ports_value[0]; 00110 Mat* output = ports_value[1]; 00111 Mat* input_gradient = ports_gradient[0]; 00112 Mat* output_gradient = ports_gradient[1]; 00113 00114 int mbs=output->length(); 00115 if (input_gradient) 00116 { 00117 input_gradient->resize(mbs,output->width()); 00118 for (int t=0;t<mbs;t++) 00119 { 00120 real* yt = (*output)[t]; 00121 real* dyt = (*output_gradient)[t]; 00122 real* dxt = (*input_gradient)[t]; 00123 real* xt = (*input)[t]; 00124 if (copy_gradients) 00125 for (int i=0;i<output->width();i++) 00126 dxt[i] += dyt[i]; 00127 else if (saturate_gradients) 00128 for (int i=0;i<output->width();i++) 00129 dxt[i] += dyt[i]*xt[i]*(1-xt[i]); 00130 else for (int i=0;i<output->width();i++) 00131 if ((yt[i]-0.5)*dyt[i] > 0) 00132 dxt[i] += dyt[i]; 00133 } 00134 } 00135 } 00136 00138 // bpropDoesNothing // 00140 /* THIS METHOD IS OPTIONAL 00141 // the default implementation returns false 00142 bool BinarizeModule::bpropDoesNothing() 00143 { 00144 } 00145 */ 00146 00148 // finalize // 00150 /* THIS METHOD IS OPTIONAL 00151 void BinarizeModule::finalize() 00152 { 00153 } 00154 */ 00155 00157 // forget // 00159 void BinarizeModule::forget() 00160 { 00161 } 00162 00164 // fprop // 00166 void BinarizeModule::fprop(const TVec<Mat*>& ports_value) 00167 { 00168 PLASSERT( ports_value.length() == nPorts() ); 00169 // check which ports are input (ports_value[i] && !ports_value[i]->isEmpty()) 00170 // which ports are output (ports_value[i] && ports_value[i]->isEmpty()) 00171 // and which ports are ignored (!ports_value[i]). 00172 // If that combination of (input,output,ignored) is feasible by this class 00173 // then perform the corresponding computation. Otherwise launch the error below. 00174 // See the comment in the header file for more information. 00175 00176 PLASSERT_MSG(random_gen, 00177 "random_gen should be initialized before generating samples"); 00178 00179 Mat* input = ports_value[0]; 00180 Mat* output = ports_value[1]; 00181 int mbs=input->length(); 00182 output->resize(mbs,input->width()); 00183 for (int t=0;t<mbs;t++) 00184 { 00185 real* xt = (*input)[t]; 00186 real* yt = (*output)[t]; 00187 int w=input->width(); 00188 if (stochastic) 00189 for (int i=0;i<w;i++) 00190 yt[i]=random_gen->binomial_sample(xt[i]); 00191 else 00192 for (int i=0;i<w;i++) 00193 yt[i]=xt[i]>=0.5?1:0; 00194 } 00195 } 00196 00198 // getPortIndex // 00200 /* Optional 00201 int BinarizeModule::getPortIndex(const string& port) 00202 {} 00203 */ 00204 00206 // getPorts // 00208 const TVec<string>& BinarizeModule::getPorts() { 00209 return inherited::getPorts(); 00210 } 00211 00213 // getPortSizes // 00215 const TMat<int>& BinarizeModule::getPortSizes() { 00216 port_sizes.resize(2,2); 00217 port_sizes.fill(-1); 00218 return port_sizes; 00219 } 00220 00221 00223 // makeDeepCopyFromShallowCopy // 00225 void BinarizeModule::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00226 { 00227 inherited::makeDeepCopyFromShallowCopy(copies); 00228 00229 // ### Call deepCopyField on all "pointer-like" fields 00230 // ### that you wish to be deepCopied rather than 00231 // ### shallow-copied. 00232 // ### ex: 00233 // deepCopyField(trainvec, copies); 00234 00235 // ### Remove this line when you have fully implemented this method. 00236 PLERROR("BinarizeModule::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!"); 00237 } 00238 00240 // setLearningRate // 00242 /* OPTIONAL 00243 // The default implementation raises a warning and does not do anything. 00244 void BinarizeModule::setLearningRate(real dynamic_learning_rate) 00245 { 00246 } 00247 */ 00248 00249 00250 } 00251 // end of namespace PLearn 00252 00253 00254 /* 00255 Local Variables: 00256 mode:c++ 00257 c-basic-offset:4 00258 c-file-style:"stroustrup" 00259 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00260 indent-tabs-mode:nil 00261 fill-column:79 00262 End: 00263 */ 00264 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :