diff options
author | Evan Shelhamer <shelhamer@imaginarynumber.net> | 2014-09-05 19:36:35 -0700 |
---|---|---|
committer | Evan Shelhamer <shelhamer@imaginarynumber.net> | 2014-09-07 03:34:53 +0200 |
commit | e922d119b2101ba40c7ab5e129260a6b0d7c8fe4 (patch) | |
tree | 7be7433c67d4baec61de37c7597fd00ea2315191 /include/caffe/vision_layers.hpp | |
parent | e05428f1af54324415723a92b3cc72907378e35f (diff) | |
download | caffeonacl-e922d119b2101ba40c7ab5e129260a6b0d7c8fe4.tar.gz caffeonacl-e922d119b2101ba40c7ab5e129260a6b0d7c8fe4.tar.bz2 caffeonacl-e922d119b2101ba40c7ab5e129260a6b0d7c8fe4.zip |
revert separate strategies: engines will extend the caffe standards
Diffstat (limited to 'include/caffe/vision_layers.hpp')
-rw-r--r-- | include/caffe/vision_layers.hpp | 66 |
1 files changed, 11 insertions, 55 deletions
diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 3143c33d..2dca2bf7 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -39,13 +39,13 @@ class ConvolutionLayer : public Layer<Dtype> { protected: virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top) = 0; + vector<Blob<Dtype>*>* top); virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top) = 0; + vector<Blob<Dtype>*>* top); virtual void Backward_cpu(const vector<Blob<Dtype>*>& top, - const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) = 0; + const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom); virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, - const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) = 0; + const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom); int kernel_h_, kernel_w_; int stride_h_, stride_w_; @@ -57,28 +57,7 @@ class ConvolutionLayer : public Layer<Dtype> { int num_output_; int height_out_, width_out_; bool bias_term_; -}; - -/* CaffeConvolutionLayer -*/ -template <typename Dtype> -class CaffeConvolutionLayer : public ConvolutionLayer<Dtype> { - public: - explicit CaffeConvolutionLayer(const LayerParameter& param) - : ConvolutionLayer<Dtype>(param) {} - virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top); - - protected: - virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top); - virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top); - virtual void Backward_cpu(const vector<Blob<Dtype>*>& top, - const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom); - virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, - const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom); - + // For the Caffe matrix multiplication convolution. int M_, K_, N_; Blob<Dtype> col_buffer_; Blob<Dtype> bias_multiplier_; @@ -217,35 +196,6 @@ class PoolingLayer : public Layer<Dtype> { virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_POOLING; } - - protected: - virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top) = 0; - virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top) = 0; - virtual void Backward_cpu(const vector<Blob<Dtype>*>& top, - const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) = 0; - virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, - const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) = 0; - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int pad_h_, pad_w_; - int channels_; - int height_, width_; - int pooled_height_, pooled_width_; -}; - -/* CaffePoolingLayer -*/ -template <typename Dtype> -class CaffePoolingLayer : public PoolingLayer<Dtype> { - public: - explicit CaffePoolingLayer(const LayerParameter& param) - : PoolingLayer<Dtype>(param) {} - virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom, - vector<Blob<Dtype>*>* top); - virtual inline int ExactNumBottomBlobs() const { return 1; } virtual inline int MinTopBlobs() const { return 1; } // MAX POOL layers can output an extra top blob for the mask; @@ -265,6 +215,12 @@ class CaffePoolingLayer : public PoolingLayer<Dtype> { virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom); + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int pad_h_, pad_w_; + int channels_; + int height_, width_; + int pooled_height_, pooled_width_; Blob<Dtype> rand_idx_; Blob<int> max_idx_; }; |