summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorMohamed Omran <mohamed.omran@gmail.com>2015-11-26 01:46:42 +0100
committerMohamed Omran <mohamed.omran@gmail.com>2015-12-04 11:07:42 +0100
commita6681945be4736a584adadfaf2bffe43ad31422e (patch)
tree2403760ba8959122867a83f525634cafd06dcb62 /include
parent7e40583db034fc292de3a3a09ea08a2861dd288c (diff)
downloadcaffe-a6681945be4736a584adadfaf2bffe43ad31422e.tar.gz
caffe-a6681945be4736a584adadfaf2bffe43ad31422e.tar.bz2
caffe-a6681945be4736a584adadfaf2bffe43ad31422e.zip
ELU layer with basic tests
Diffstat (limited to 'include')
-rw-r--r--include/caffe/layers/elu_layer.hpp86
1 files changed, 86 insertions, 0 deletions
diff --git a/include/caffe/layers/elu_layer.hpp b/include/caffe/layers/elu_layer.hpp
new file mode 100644
index 00000000..0796e898
--- /dev/null
+++ b/include/caffe/layers/elu_layer.hpp
@@ -0,0 +1,86 @@
+#ifndef CAFFE_ELU_LAYER_HPP_
+#define CAFFE_ELU_LAYER_HPP_
+
+#include <vector>
+
+#include "caffe/blob.hpp"
+#include "caffe/layer.hpp"
+#include "caffe/proto/caffe.pb.h"
+
+#include "caffe/layers/neuron_layer.hpp"
+
+namespace caffe {
+
+/**
+ * @brief Exponential Linear Unit non-linearity @f$
+ * y = \left\{
+ * \begin{array}{lr}
+ * x & \mathrm{if} \; x > 0 \\
+ * \alpha (\exp(x)-1) & \mathrm{if} \; x \le 0
+ * \end{array} \right.
+ * @f$.
+ */
+template <typename Dtype>
+class ELULayer : public NeuronLayer<Dtype> {
+ public:
+ /**
+ * @param param provides ELUParameter elu_param,
+ * with ELULayer options:
+ * - alpha (\b optional, default 1).
+ * the value @f$ \alpha @f$ by which controls saturation for negative inputs.
+ */
+ explicit ELULayer(const LayerParameter& param)
+ : NeuronLayer<Dtype>(param) {}
+
+ virtual inline const char* type() const { return "ELU"; }
+
+ protected:
+ /**
+ * @param bottom input Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the inputs @f$ x @f$
+ * @param top output Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the computed outputs @f$
+ * y = \left\{
+ * \begin{array}{lr}
+ * x & \mathrm{if} \; x > 0 \\
+ * \alpha (\exp(x)-1) & \mathrm{if} \; x \le 0
+ * \end{array} \right.
+ * @f$.
+ */
+ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+ virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+
+ /**
+ * @brief Computes the error gradient w.r.t. the ELU inputs.
+ *
+ * @param top output Blob vector (length 1), providing the error gradient with
+ * respect to the outputs
+ * -# @f$ (N \times C \times H \times W) @f$
+ * containing error gradients @f$ \frac{\partial E}{\partial y} @f$
+ * with respect to computed outputs @f$ y @f$
+ * @param propagate_down see Layer::Backward.
+ * @param bottom input Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the inputs @f$ x @f$; Backward fills their diff with
+ * gradients @f$
+ * \frac{\partial E}{\partial x} = \left\{
+ * \begin{array}{lr}
+ * 1 & \mathrm{if} \; x > 0 \\
+ * y + \alpha & \mathrm{if} \; x \le 0
+ * \end{array} \right.
+ * @f$ if propagate_down[0].
+ */
+ virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
+ virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
+};
+
+
+} // namespace caffe
+
+#endif // CAFFE_ELU_LAYER_HPP_