summaryrefslogtreecommitdiff
path: root/src/caffe/layers/lrn_layer.cu
diff options
context:
space:
mode:
Diffstat (limited to 'src/caffe/layers/lrn_layer.cu')
-rw-r--r--src/caffe/layers/lrn_layer.cu12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu
index eee12e66..d6cb23bf 100644
--- a/src/caffe/layers/lrn_layer.cu
+++ b/src/caffe/layers/lrn_layer.cu
@@ -54,16 +54,17 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* in,
template <typename Dtype>
-Dtype LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
- return CrossChannelForward_gpu(bottom, top);
+ CrossChannelForward_gpu(bottom, top);
+ break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
- return WithinChannelForward(bottom, top);
+ WithinChannelForward(bottom, top);
+ break;
default:
LOG(FATAL) << "Unknown normalization region.";
- return Dtype(0);
}
}
@@ -77,7 +78,7 @@ __global__ void LRNComputeOutput(const int nthreads, const Dtype* in,
}
template <typename Dtype>
-Dtype LRNLayer<Dtype>::CrossChannelForward_gpu(
+void LRNLayer<Dtype>::CrossChannelForward_gpu(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
@@ -96,7 +97,6 @@ Dtype LRNLayer<Dtype>::CrossChannelForward_gpu(
LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
- return Dtype(0.);
}