PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // ScaleGradientModule.cc 00004 // 00005 // Copyright (C) 2008 Pascal Lamblin 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 // Authors: Pascal Lamblin 00036 00041 #include "ScaleGradientModule.h" 00042 #include <plearn/math/TMat_maths.h> 00043 00044 namespace PLearn { 00045 using namespace std; 00046 00047 PLEARN_IMPLEMENT_OBJECT( 00048 ScaleGradientModule, 00049 "Scales (or suppress) the gradient that is backpropagated.", 00050 "" 00051 ); 00052 00054 // ScaleGradientModule // 00056 ScaleGradientModule::ScaleGradientModule(): 00057 scale(0) 00058 { 00059 } 00060 00062 // declareOptions // 00064 void ScaleGradientModule::declareOptions(OptionList& ol) 00065 { 00066 declareOption(ol, "scale", &ScaleGradientModule::scale, 00067 OptionBase::buildoption, 00068 "The scaling factor. If 0, no gradient will be backpropagated." 00069 ); 00070 00071 // Now call the parent class' declareOptions 00072 inherited::declareOptions(ol); 00073 } 00074 00076 // build_ // 00078 void ScaleGradientModule::build_() 00079 { 00080 } 00081 00083 // build // 00085 void ScaleGradientModule::build() 00086 { 00087 inherited::build(); 00088 build_(); 00089 } 00090 00092 // forget // 00094 void ScaleGradientModule::forget() 00095 { 00096 } 00097 00099 // fprop // 00101 void ScaleGradientModule::fprop(const Mat& inputs, Mat& outputs) 00102 { 00103 outputs.resize(inputs.length(), inputs.width()); 00104 outputs << inputs; 00105 } 00106 00107 void ScaleGradientModule::fprop(const Vec& input, Vec& output) const 00108 { 00109 output.resize(input.size()); 00110 output << input; 00111 } 00112 00114 // bpropUpdate // 00116 void ScaleGradientModule::bpropUpdate(const Mat& inputs, const Mat& outputs, 00117 Mat& input_gradients, 00118 const Mat& output_gradients, 00119 bool accumulate) 00120 { 00121 if (accumulate) 00122 { 00123 PLASSERT_MSG( input_gradients.length() == output_gradients.length() 00124 && input_gradients.width() == output_gradients.width(), 00125 "Cannot accumulate into input_gradients and resize it" ); 00126 00127 if (scale == 0) 00128 return; 00129 else // input_gradients += scale * output_gradients 00130 multiplyAcc(input_gradients, output_gradients, scale); 00131 } 00132 else 00133 { 00134 input_gradients.resize(output_gradients.length(), 00135 output_gradients.width()); 00136 00137 if (scale == 0) 00138 input_gradients.clear(); 00139 else // input_gradients = scale * output_gradients 00140 multiply(input_gradients, output_gradients, scale); 00141 } 00142 } 00143 00144 void ScaleGradientModule::bpropUpdate(const Vec& input, const Vec& output, 00145 Vec& input_gradient, 00146 const Vec& output_gradient, 00147 bool accumulate) 00148 { 00149 if (accumulate) 00150 { 00151 PLASSERT_MSG( input_gradient.size() == output_gradient.size(), 00152 "Cannot accumulate into input_gradient and resize it" ); 00153 if (scale == 0) 00154 return; 00155 else // input_gradient += scale * output_gradients 00156 multiplyAcc(input_gradient, output_gradient, scale); 00157 } 00158 else 00159 { 00160 input_gradient.resize(output_gradient.size()); 00161 if (scale == 0) 00162 input_gradient.clear(); 00163 else // input_gradient = scale * output_gradients 00164 multiply(output_gradient, scale, input_gradient); 00165 } 00166 } 00167 00168 void ScaleGradientModule::setLearningRate(real the_learning_rate) 00169 { 00170 // pass; 00171 } 00172 00174 // makeDeepCopyFromShallowCopy // 00176 void ScaleGradientModule::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00177 { 00178 inherited::makeDeepCopyFromShallowCopy(copies); 00179 } 00180 00181 } 00182 // end of namespace PLearn 00183 00184 00185 /* 00186 Local Variables: 00187 mode:c++ 00188 c-basic-offset:4 00189 c-file-style:"stroustrup" 00190 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00191 indent-tabs-mode:nil 00192 fill-column:79 00193 End: 00194 */ 00195 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :