summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJeff Donahue <jeff.donahue@gmail.com>2014-02-25 15:21:23 -0800
committerEvan Shelhamer <shelhamer@imaginarynumber.net>2014-02-26 15:42:38 -0800
commit22fa0a2945e58d3f748071034b54ec8610fc265a (patch)
treef0a642cb4b25a5d89e949453d4b59cda04ae9818 /src
parentb065ae215a95cf9ec87dc51007b738977cd799bd (diff)
downloadcaffeonacl-22fa0a2945e58d3f748071034b54ec8610fc265a.tar.gz
caffeonacl-22fa0a2945e58d3f748071034b54ec8610fc265a.tar.bz2
caffeonacl-22fa0a2945e58d3f748071034b54ec8610fc265a.zip
fix most linter errors
Diffstat (limited to 'src')
-rw-r--r--src/caffe/common.cpp5
-rw-r--r--src/caffe/layers/bnll_layer.cu6
-rw-r--r--src/caffe/layers/conv_layer.cpp2
-rw-r--r--src/caffe/layers/data_layer.cpp6
-rw-r--r--src/caffe/layers/dropout_layer.cu13
-rw-r--r--src/caffe/layers/flatten_layer.cpp2
-rw-r--r--src/caffe/layers/im2col_layer.cpp13
-rw-r--r--src/caffe/layers/inner_product_layer.cpp2
-rw-r--r--src/caffe/layers/loss_layer.cu15
-rw-r--r--src/caffe/layers/lrn_layer.cpp2
-rw-r--r--src/caffe/layers/lrn_layer.cu5
-rw-r--r--src/caffe/layers/neuron_layer.cpp2
-rw-r--r--src/caffe/layers/pooling_layer.cpp2
-rw-r--r--src/caffe/layers/pooling_layer.cu17
-rw-r--r--src/caffe/layers/relu_layer.cu9
-rw-r--r--src/caffe/layers/sigmoid_layer.cu11
-rw-r--r--src/caffe/layers/softmax_layer.cu11
-rw-r--r--src/caffe/layers/softmax_loss_layer.cu10
-rw-r--r--src/caffe/layers/split_layer.cpp2
-rw-r--r--src/caffe/layers/tanh_layer.cu12
-rw-r--r--src/caffe/solver.cpp5
-rw-r--r--src/caffe/test/test_blob.cpp6
-rw-r--r--src/caffe/test/test_caffe_main.hpp6
-rw-r--r--src/caffe/test/test_common.cpp10
-rw-r--r--src/caffe/test/test_convolution_layer.cpp21
-rw-r--r--src/caffe/test/test_data_layer.cpp15
-rw-r--r--src/caffe/test/test_euclidean_loss_layer.cpp5
-rw-r--r--src/caffe/test/test_filler.cpp12
-rw-r--r--src/caffe/test/test_flatten_layer.cpp13
-rw-r--r--src/caffe/test/test_im2col_layer.cpp13
-rw-r--r--src/caffe/test/test_innerproduct_layer.cpp56
-rw-r--r--src/caffe/test/test_lrn_layer.cpp30
-rw-r--r--src/caffe/test/test_multinomial_logistic_loss_layer.cpp6
-rw-r--r--src/caffe/test/test_neuron_layer.cpp34
-rw-r--r--src/caffe/test/test_platform.cpp48
-rw-r--r--src/caffe/test/test_pooling_layer.cpp21
-rw-r--r--src/caffe/test/test_protobuf.cpp4
-rw-r--r--src/caffe/test/test_softmax_layer.cpp10
-rw-r--r--src/caffe/test/test_softmax_with_loss_layer.cpp6
-rw-r--r--src/caffe/test/test_split_layer.cpp11
-rw-r--r--src/caffe/test/test_stochastic_pooling.cpp (renamed from src/caffe/test/test_stochastic_pooing.cpp)18
-rw-r--r--src/caffe/test/test_syncedmem.cpp23
-rw-r--r--src/caffe/test/test_tanh_layer.cpp35
-rw-r--r--src/caffe/test/test_util_blas.cpp33
-rw-r--r--src/caffe/util/im2col.cpp47
-rw-r--r--src/caffe/util/im2col.cu59
-rw-r--r--src/caffe/util/io.cpp7
-rw-r--r--src/caffe/util/math_functions.cu2
48 files changed, 416 insertions, 287 deletions
diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp
index 74985794..11096742 100644
--- a/src/caffe/common.cpp
+++ b/src/caffe/common.cpp
@@ -36,7 +36,8 @@ Caffe::Caffe()
}
// Try to create a vsl stream. This should almost always work, but we will
// check it anyway.
- if (vslNewStream(&vsl_stream_, VSL_BRNG_MT19937, cluster_seedgen()) != VSL_STATUS_OK) {
+ if (vslNewStream(&vsl_stream_, VSL_BRNG_MT19937,
+ cluster_seedgen()) != VSL_STATUS_OK) {
LOG(ERROR) << "Cannot create vsl stream. VSL random number generator "
<< "won't be available.";
}
@@ -48,7 +49,7 @@ Caffe::~Caffe() {
CURAND_CHECK(curandDestroyGenerator(curand_generator_));
}
if (vsl_stream_) VSL_CHECK(vslDeleteStream(&vsl_stream_));
-};
+}
void Caffe::set_random_seed(const unsigned int seed) {
// Curand seed
diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu
index 2c06a63d..c1795bcf 100644
--- a/src/caffe/layers/bnll_layer.cu
+++ b/src/caffe/layers/bnll_layer.cu
@@ -1,8 +1,10 @@
// Copyright 2013 Yangqing Jia
+#include <algorithm>
+#include <vector>
+
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
-#include <algorithm>
using std::max;
@@ -57,6 +59,7 @@ void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
@@ -81,6 +84,7 @@ Dtype BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp
index 69a860bf..15490393 100644
--- a/src/caffe/layers/conv_layer.cpp
+++ b/src/caffe/layers/conv_layer.cpp
@@ -73,7 +73,7 @@ void ConvolutionLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
bias_multiplier_data[i] = 1.;
}
}
-};
+}
template <typename Dtype>
diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp
index ffb7fd0a..7950313d 100644
--- a/src/caffe/layers/data_layer.cpp
+++ b/src/caffe/layers/data_layer.cpp
@@ -50,12 +50,15 @@ void* DataLayerPrefetch(void* layer_pointer) {
int h_off, w_off;
// We only do random crop when we do training.
if (Caffe::phase() == Caffe::TRAIN) {
+ // NOLINT_NEXTLINE(runtime/threadsafe_fn)
h_off = rand() % (height - cropsize);
+ // NOLINT_NEXTLINE(runtime/threadsafe_fn)
w_off = rand() % (width - cropsize);
} else {
h_off = (height - cropsize) / 2;
w_off = (width - cropsize) / 2;
}
+ // NOLINT_NEXTLINE(runtime/threadsafe_fn)
if (mirror && rand() % 2) {
// Copy mirrored version
for (int c = 0; c < channels; ++c) {
@@ -111,7 +114,7 @@ void* DataLayerPrefetch(void* layer_pointer) {
}
}
- return (void*)NULL;
+ return reinterpret_cast<void*>(NULL);
}
template <typename Dtype>
@@ -140,6 +143,7 @@ void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
iter_->SeekToFirst();
// Check if we would need to randomly skip a few data points
if (this->layer_param_.rand_skip()) {
+ // NOLINT_NEXTLINE(runtime/threadsafe_fn)
unsigned int skip = rand() % this->layer_param_.rand_skip();
LOG(INFO) << "Skipping first " << skip << " data points.";
while (skip-- > 0) {
diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu
index df94f2de..0e15b6fa 100644
--- a/src/caffe/layers/dropout_layer.cu
+++ b/src/caffe/layers/dropout_layer.cu
@@ -2,6 +2,7 @@
#include <algorithm>
#include <limits>
+#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
@@ -23,14 +24,14 @@ void DropoutLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
DCHECK(threshold_ < 1.);
scale_ = 1. / (1. - threshold_);
uint_thres_ = (unsigned int)(UINT_MAX * threshold_);
-};
+}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
- int* mask = (int*)rand_vec_->mutable_cpu_data();
+ int* mask = reinterpret_cast<int*>(rand_vec_->mutable_cpu_data());
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
// Create random numbers
@@ -52,7 +53,7 @@ Dtype DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
if (propagate_down) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- const int* mask = (int*)(rand_vec_->cpu_data());
+ const int* mask = reinterpret_cast<const int*>(rand_vec_->cpu_data());
const int count = (*bottom)[0]->count();
for (int i = 0; i < count; ++i) {
bottom_diff[i] = top_diff[i] * mask[i] * scale_;
@@ -81,9 +82,10 @@ void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
(unsigned int*)(rand_vec_->mutable_gpu_data()), count));
// set thresholds
+ // NOLINT_NEXTLINE(whitespace/operators)
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
- count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_, scale_,
- top_data);
+ count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_,
+ scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
CUDA_CHECK(cudaMemcpy(top_data, bottom_data,
@@ -111,6 +113,7 @@ Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const unsigned int* mask = (unsigned int*)rand_vec_->gpu_data();
const int count = (*bottom)[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp
index a202f727..bedf2963 100644
--- a/src/caffe/layers/flatten_layer.cpp
+++ b/src/caffe/layers/flatten_layer.cpp
@@ -19,7 +19,7 @@ void FlattenLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
count_ = bottom[0]->num() * channels_out;
CHECK_EQ(count_, bottom[0]->count());
CHECK_EQ(count_, (*top)[0]->count());
-};
+}
template <typename Dtype>
void FlattenLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp
index 5f9986a2..a94209b3 100644
--- a/src/caffe/layers/im2col_layer.cpp
+++ b/src/caffe/layers/im2col_layer.cpp
@@ -21,8 +21,9 @@ void Im2colLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
HEIGHT_ = bottom[0]->height();
WIDTH_ = bottom[0]->width();
(*top)[0]->Reshape(bottom[0]->num(), CHANNELS_ * KSIZE_ * KSIZE_,
- (HEIGHT_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1, (WIDTH_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1);
-};
+ (HEIGHT_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1,
+ (WIDTH_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1);
+}
template <typename Dtype>
void Im2colLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
@@ -31,7 +32,7 @@ void Im2colLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
Dtype* top_data = (*top)[0]->mutable_cpu_data();
for (int n = 0; n < bottom[0]->num(); ++n) {
im2col_cpu(bottom_data + bottom[0]->offset(n), CHANNELS_, HEIGHT_,
- WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n));
+ WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n));
}
}
@@ -42,7 +43,7 @@ void Im2colLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
Dtype* top_data = (*top)[0]->mutable_gpu_data();
for (int n = 0; n < bottom[0]->num(); ++n) {
im2col_gpu(bottom_data + bottom[0]->offset(n), CHANNELS_, HEIGHT_,
- WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n));
+ WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n));
}
}
@@ -53,7 +54,7 @@ Dtype Im2colLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
for (int n = 0; n < top[0]->num(); ++n) {
col2im_cpu(top_diff + top[0]->offset(n), CHANNELS_, HEIGHT_,
- WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n));
+ WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n));
}
return Dtype(0.);
}
@@ -66,7 +67,7 @@ Dtype Im2colLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
for (int n = 0; n < top[0]->num(); ++n) {
col2im_gpu(top_diff + top[0]->offset(n), CHANNELS_, HEIGHT_,
- WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n));
+ WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n));
}
return Dtype(0.);
}
diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp
index 18f1df0d..d770e23a 100644
--- a/src/caffe/layers/inner_product_layer.cpp
+++ b/src/caffe/layers/inner_product_layer.cpp
@@ -59,7 +59,7 @@ void InnerProductLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
bias_multiplier_data[i] = 1.;
}
}
-};
+}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
diff --git a/src/caffe/layers/loss_layer.cu b/src/caffe/layers/loss_layer.cu
index ac05ba41..745bfa4b 100644
--- a/src/caffe/layers/loss_layer.cu
+++ b/src/caffe/layers/loss_layer.cu
@@ -1,7 +1,9 @@
// Copyright 2013 Yangqing Jia
+
#include <algorithm>
#include <cmath>
#include <cfloat>
+#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
@@ -24,12 +26,12 @@ void MultinomialLogisticLossLayer<Dtype>::SetUp(
CHECK_EQ(bottom[1]->channels(), 1);
CHECK_EQ(bottom[1]->height(), 1);
CHECK_EQ(bottom[1]->width(), 1);
-};
+}
template <typename Dtype>
-Dtype MultinomialLogisticLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down,
+Dtype MultinomialLogisticLossLayer<Dtype>::Backward_cpu(
+ const vector<Blob<Dtype>*>& top, const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
const Dtype* bottom_data = (*bottom)[0]->cpu_data();
const Dtype* bottom_label = (*bottom)[1]->cpu_data();
@@ -66,7 +68,7 @@ void InfogainLossLayer<Dtype>::SetUp(
CHECK_EQ(infogain_.num(), 1);
CHECK_EQ(infogain_.channels(), 1);
CHECK_EQ(infogain_.height(), infogain_.width());
-};
+}
template <typename Dtype>
@@ -154,10 +156,11 @@ void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
max_id = j;
}
}
- if (max_id == (int)bottom_label[i]) {
+ if (max_id == static_cast<int>(bottom_label[i])) {
++accuracy;
}
- Dtype prob = max(bottom_data[i * dim + (int)bottom_label[i]], kLOG_THRESHOLD);
+ Dtype prob = max(bottom_data[i * dim + static_cast<int>(bottom_label[i])],
+ kLOG_THRESHOLD);
logprob -= log(prob);
}
// LOG(INFO) << "Accuracy: " << accuracy;
diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp
index 337b77b7..36dbe41e 100644
--- a/src/caffe/layers/lrn_layer.cpp
+++ b/src/caffe/layers/lrn_layer.cpp
@@ -25,7 +25,7 @@ void LRNLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
pre_pad_ = (size_ - 1) / 2;
alpha_ = this->layer_param_.alpha();
beta_ = this->layer_param_.beta();
-};
+}
template <typename Dtype>
void LRNLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu
index 2afbf383..9c82b35d 100644
--- a/src/caffe/layers/lrn_layer.cu
+++ b/src/caffe/layers/lrn_layer.cu
@@ -1,5 +1,7 @@
// Copyright 2013 Yangqing Jia
+#include <vector>
+
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
@@ -74,11 +76,13 @@ void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
+ // NOLINT_NEXTLINE(whitespace/operators)
LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom_data, num_, channels_, height_, width_, size_,
alpha_ / size_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
@@ -151,6 +155,7 @@ template <typename Dtype>
Dtype LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
int n_threads = num_ * height_ * width_;
+ // NOLINT_NEXTLINE(whitespace/operators)
LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,
diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp
index dd09dca3..5def7559 100644
--- a/src/caffe/layers/neuron_layer.cpp
+++ b/src/caffe/layers/neuron_layer.cpp
@@ -18,7 +18,7 @@ void NeuronLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
(*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
}
-};
+}
INSTANTIATE_CLASS(NeuronLayer);
diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp
index 61416421..ce30e842 100644
--- a/src/caffe/layers/pooling_layer.cpp
+++ b/src/caffe/layers/pooling_layer.cpp
@@ -34,7 +34,7 @@ void PoolingLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
rand_idx_.Reshape(bottom[0]->num(), CHANNELS_, POOLED_HEIGHT_,
POOLED_WIDTH_);
}
-};
+}
// TODO(Yangqing): Is there a faster way to do pooling in the channel-first
// case?
diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu
index 4fd326cb..75078b3d 100644
--- a/src/caffe/layers/pooling_layer.cu
+++ b/src/caffe/layers/pooling_layer.cu
@@ -2,6 +2,8 @@
#include <algorithm>
#include <cfloat>
+#include <vector>
+
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
@@ -144,12 +146,14 @@ void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
int count = (*top)[0]->count();
switch (this->layer_param_.pool()) {
case LayerParameter_PoolMethod_MAX:
+ // NOLINT_NEXTLINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
top_data);
break;
case LayerParameter_PoolMethod_AVE:
+ // NOLINT_NEXTLINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
@@ -160,12 +164,16 @@ void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
// We need to create the random index as well.
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(),
rand_idx_.mutable_gpu_data(), count));
- StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+ // NOLINT_NEXTLINE(whitespace/operators)
+ StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
+ CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
- StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+ // NOLINT_NEXTLINE(whitespace/operators)
+ StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
+ CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
top_data);
@@ -267,7 +275,7 @@ __global__ void StoPoolBackward(const int nthreads,
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
- (index == int(rand_idx[ph * pooled_width + pw]));
+ (index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
@@ -286,18 +294,21 @@ Dtype PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
int count = (*bottom)[0]->count();
switch (this->layer_param_.pool()) {
case LayerParameter_PoolMethod_MAX:
+ // NOLINT_NEXTLINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,
POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff);
break;
case LayerParameter_PoolMethod_AVE:
+ // NOLINT_NEXTLINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
bottom_diff);
break;
case LayerParameter_PoolMethod_STOCHASTIC:
+ // NOLINT_NEXTLINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,
diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu
index b0fc46ef..c56d22a5 100644
--- a/src/caffe/layers/relu_layer.cu
+++ b/src/caffe/layers/relu_layer.cu
@@ -1,8 +1,10 @@
// Copyright 2013 Yangqing Jia
+#include <algorithm>
+#include <vector>
+
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
-#include <algorithm>
using std::max;
@@ -49,11 +51,13 @@ void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
- // << (unsigned long)bottom_data << " top_data: " << (unsigned long)top_data
+ // << (unsigned long)bottom_data
+ // << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
@@ -76,6 +80,7 @@ Dtype ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu
index f112a529..1680aa6b 100644
--- a/src/caffe/layers/sigmoid_layer.cu
+++ b/src/caffe/layers/sigmoid_layer.cu
@@ -1,9 +1,11 @@
// Copyright 2014 Tobias Domhan
-#include "caffe/layer.hpp"
-#include "caffe/vision_layers.hpp"
#include <algorithm>
#include <cmath>
+#include <vector>
+
+#include "caffe/layer.hpp"
+#include "caffe/vision_layers.hpp"
using std::max;
@@ -63,11 +65,13 @@ void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
SigmoidForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
- // << (unsigned long)bottom_data << " top_data: " << (unsigned long)top_data
+ // << (unsigned long)bottom_data
+ // << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
@@ -91,6 +95,7 @@ Dtype SigmoidLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
SigmoidBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu
index a7659697..f7adab3b 100644
--- a/src/caffe/layers/softmax_layer.cu
+++ b/src/caffe/layers/softmax_layer.cu
@@ -3,7 +3,8 @@
#include <algorithm>
#include <cfloat>
#include <vector>
-#include <thrust/device_vector.h>
+
+#include "thrust/device_vector.h"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
@@ -27,7 +28,7 @@ void SoftmaxLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
multiplier_data[i] = 1.;
}
scale_.Reshape(bottom[0]->num(), 1, 1, 1);
-};
+}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
@@ -104,19 +105,23 @@ void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
// we need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// Compute max
+ // NOLINT_NEXTLINE(whitespace/operators)
kernel_get_max<Dtype><<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS>>>(
num, dim, bottom_data, scale_data);
// subtraction
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_data, sum_multiplier_.gpu_data(), 1., top_data);
// Perform exponentiation
+ // NOLINT_NEXTLINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
num * dim, top_data, top_data);
// sum after exp
caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
sum_multiplier_.gpu_data(), 0., scale_data);
// Do division
- kernel_softmax_div<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
+ // NOLINT_NEXTLINE(whitespace/operators)
+ kernel_softmax_div<Dtype><<<CAFFE_GET_BLOCKS(num * dim),
+ CAFFE_CUDA_NUM_THREADS>>>(
num, dim, scale_data, top_data);
}
diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu
index 9bb2313a..3e265869 100644
--- a/src/caffe/layers/softmax_loss_layer.cu
+++ b/src/caffe/layers/softmax_loss_layer.cu
@@ -21,19 +21,19 @@ void SoftmaxWithLossLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
softmax_bottom_vec_.push_back(bottom[0]);
softmax_top_vec_.push_back(&prob_);
softmax_layer_->SetUp(softmax_bottom_vec_, &softmax_top_vec_);
-};
+}
template <typename Dtype>
-void SoftmaxWithLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
+ const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
// The forward pass computes the softmax prob values.
softmax_bottom_vec_[0] = bottom[0];
softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_);
}
template <typename Dtype>
-void SoftmaxWithLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
+ const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
// The forward pass computes the softmax prob values.
softmax_bottom_vec_[0] = bottom[0];
softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_);
diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp
index 5accdd08..56e95610 100644
--- a/src/caffe/layers/split_layer.cpp
+++ b/src/caffe/layers/split_layer.cpp
@@ -25,7 +25,7 @@ void SplitLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
bottom[0]->height(), bottom[0]->width());
CHECK_EQ(count_, (*top)[i]->count());
}
-};
+}
template <typename Dtype>
void SplitLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu
index 22e0831a..28145771 100644
--- a/src/caffe/layers/tanh_layer.cu
+++ b/src/caffe/layers/tanh_layer.cu
@@ -1,9 +1,12 @@
// Copyright 2014 Aravindh Mahendran
-// TanH neuron activation function layer. Adapted from ReLU layer code written by Yangqing Jia
+// TanH neuron activation function layer.
+// Adapted from ReLU layer code written by Yangqing Jia
+
+#include <algorithm>
+#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
-#include <algorithm>
namespace caffe {
@@ -55,11 +58,13 @@ void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
TanHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
- // << (unsigned long)bottom_data << " top_data: " << (unsigned long)top_data
+ // << (unsigned long)bottom_data
+ // << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
@@ -84,6 +89,7 @@ Dtype TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
+ // NOLINT_NEXTLINE(whitespace/operators)
TanHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp
index 340bbe1d..eb024856 100644
--- a/src/caffe/solver.cpp
+++ b/src/caffe/solver.cpp
@@ -123,8 +123,9 @@ void Solver<Dtype>::Snapshot() {
// For intermediate results, we will also dump the gradient values.
net_->ToProto(&net_param, param_.snapshot_diff());
string filename(param_.snapshot_prefix());
- char iter_str_buffer[20];
- sprintf(iter_str_buffer, "_iter_%d", iter_);
+ const int kBufferSize = 20;
+ char iter_str_buffer[kBufferSize];
+ snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_);
filename += iter_str_buffer;
LOG(INFO) << "Snapshotting to " << filename;
WriteProtoToBinaryFile(net_param, filename.c_str());
diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp
index 7c3084e8..7ce1a384 100644
--- a/src/caffe/test/test_blob.cpp
+++ b/src/caffe/test/test_blob.cpp
@@ -1,8 +1,8 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/common.hpp"
#include "caffe/blob.hpp"
@@ -17,7 +17,7 @@ class BlobSimpleTest : public ::testing::Test {
protected:
BlobSimpleTest()
: blob_(new Blob<Dtype>()),
- blob_preshaped_(new Blob<Dtype>(2, 3, 4, 5)) {};
+ blob_preshaped_(new Blob<Dtype>(2, 3, 4, 5)) {}
virtual ~BlobSimpleTest() { delete blob_; delete blob_preshaped_; }
Blob<Dtype>* const blob_;
Blob<Dtype>* const blob_preshaped_;
@@ -57,4 +57,4 @@ TYPED_TEST(BlobSimpleTest, TestReshape) {
EXPECT_EQ(this->blob_->count(), 120);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_caffe_main.hpp b/src/caffe/test/test_caffe_main.hpp
index a8c16573..01cb0c81 100644
--- a/src/caffe/test/test_caffe_main.hpp
+++ b/src/caffe/test/test_caffe_main.hpp
@@ -11,8 +11,9 @@
#include <cstdlib>
#include <cstdio>
-#include <iostream>
+using std::cout;
+using std::endl;
namespace caffe {
@@ -20,8 +21,7 @@ cudaDeviceProp CAFFE_TEST_CUDA_PROP;
} // namespace caffe
-using namespace caffe;
-using namespace std;
+using caffe::CAFFE_TEST_CUDA_PROP;
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp
index 3afd6d09..8b521cc7 100644
--- a/src/caffe/test/test_common.cpp
+++ b/src/caffe/test/test_common.cpp
@@ -1,8 +1,8 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
@@ -40,10 +40,10 @@ TEST_F(CommonTest, TestRandSeedCPU) {
SyncedMemory data_b(10 * sizeof(int));
Caffe::set_random_seed(1701);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(),
- 10, (int*)data_a.mutable_cpu_data(), 0.5);
+ 10, reinterpret_cast<int*>(data_a.mutable_cpu_data()), 0.5);
Caffe::set_random_seed(1701);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(),
- 10, (int*)data_b.mutable_cpu_data(), 0.5);
+ 10, reinterpret_cast<int*>(data_b.mutable_cpu_data()), 0.5);
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(((const int*)(data_a.cpu_data()))[i],
((const int*)(data_b.cpu_data()))[i]);
@@ -56,10 +56,10 @@ TEST_F(CommonTest, TestRandSeedGPU) {
SyncedMemory data_b(10 * sizeof(unsigned int));
Caffe::set_random_seed(1701);
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
- (unsigned int*)data_a.mutable_gpu_data(), 10));
+ reinterpret_cast<unsigned int*>(data_a.mutable_gpu_data()), 10));
Caffe::set_random_seed(1701);
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
- (unsigned int*)data_b.mutable_gpu_data(), 10));
+ reinterpret_cast<unsigned int*>(data_b.mutable_gpu_data()), 10));
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i],
((const unsigned int*)(data_b.cpu_data()))[i]);
diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp
index ebd3cf45..9f47e6d4 100644
--- a/src/caffe/test/test_convolution_layer.cpp
+++ b/src/caffe/test/test_convolution_layer.cpp
@@ -1,8 +1,9 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -21,7 +22,7 @@ class ConvolutionLayerTest : public ::testing::Test {
protected:
ConvolutionLayerTest()
: blob_bottom_(new Blob<Dtype>()),
- blob_top_(new Blob<Dtype>()) {};
+ blob_top_(new Blob<Dtype>()) {}
virtual void SetUp() {
blob_bottom_->Reshape(2, 3, 6, 5);
// fill the values
@@ -31,7 +32,7 @@ class ConvolutionLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~ConvolutionLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
@@ -174,7 +175,8 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) {
Caffe::set_mode(Caffe::CPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) {
@@ -188,7 +190,8 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) {
Caffe::set_mode(Caffe::CPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) {
@@ -201,7 +204,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) {
Caffe::set_mode(Caffe::GPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) {
@@ -215,7 +219,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) {
Caffe::set_mode(Caffe::GPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp
index 719c50eb..35c34395 100644
--- a/src/caffe/test/test_data_layer.cpp
+++ b/src/caffe/test/test_data_layer.cpp
@@ -1,10 +1,10 @@
// Copyright 2013 Yangqing Jia
-#include <cuda_runtime.h>
-#include <leveldb/db.h>
-
#include <string>
+#include <vector>
+#include "cuda_runtime.h"
+#include "leveldb/db.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -14,6 +14,7 @@
#include "caffe/test/test_caffe_main.hpp"
using std::string;
+using std::stringstream;
namespace caffe {
@@ -25,12 +26,12 @@ class DataLayerTest : public ::testing::Test {
DataLayerTest()
: blob_top_data_(new Blob<Dtype>()),
blob_top_label_(new Blob<Dtype>()),
- filename(NULL) {};
+ filename(NULL) {}
virtual void SetUp() {
blob_top_vec_.push_back(blob_top_data_);
blob_top_vec_.push_back(blob_top_label_);
// Create the leveldb
- filename = tmpnam(NULL); // get temp name
+ filename = tmpnam(NULL); // get temp name
LOG(INFO) << "Using temporary leveldb " << filename;
leveldb::DB* db;
leveldb::Options options;
@@ -53,7 +54,7 @@ class DataLayerTest : public ::testing::Test {
db->Put(leveldb::WriteOptions(), ss.str(), datum.SerializeAsString());
}
delete db;
- };
+ }
virtual ~DataLayerTest() { delete blob_top_data_; delete blob_top_label_; }
@@ -112,4 +113,4 @@ TYPED_TEST(DataLayerTest, TestRead) {
}
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp
index 82ea682f..121929f8 100644
--- a/src/caffe/test/test_euclidean_loss_layer.cpp
+++ b/src/caffe/test/test_euclidean_loss_layer.cpp
@@ -3,8 +3,9 @@
#include <cmath>
#include <cstdlib>
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -55,4 +56,4 @@ TYPED_TEST(EuclideanLossLayerTest, TestGradientCPU) {
this->blob_top_vec_, 0, -1, -1);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp
index 7738ce45..c4388c27 100644
--- a/src/caffe/test/test_filler.cpp
+++ b/src/caffe/test/test_filler.cpp
@@ -1,8 +1,8 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/filler.hpp"
@@ -21,7 +21,7 @@ class ConstantFillerTest : public ::testing::Test {
filler_param_.set_value(10.);
filler_.reset(new ConstantFiller<Dtype>(filler_param_));
filler_->Fill(blob_);
- };
+ }
virtual ~ConstantFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
FillerParameter filler_param_;
@@ -50,7 +50,7 @@ class UniformFillerTest : public ::testing::Test {
filler_param_.set_max(2.);
filler_.reset(new UniformFiller<Dtype>(filler_param_));
filler_->Fill(blob_);
- };
+ }
virtual ~UniformFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
FillerParameter filler_param_;
@@ -77,7 +77,7 @@ class PositiveUnitballFillerTest : public ::testing::Test {
filler_param_() {
filler_.reset(new PositiveUnitballFiller<Dtype>(filler_param_));
filler_->Fill(blob_);
- };
+ }
virtual ~PositiveUnitballFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
FillerParameter filler_param_;
@@ -116,7 +116,7 @@ class GaussianFillerTest : public ::testing::Test {
filler_param_.set_std(0.1);
filler_.reset(new GaussianFiller<Dtype>(filler_param_));
filler_->Fill(blob_);
- };
+ }
virtual ~GaussianFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
FillerParameter filler_param_;
@@ -146,4 +146,4 @@ TYPED_TEST(GaussianFillerTest, TestFill) {
EXPECT_LE(var, target_var * 5.);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp
index 805fd72e..03dff36d 100644
--- a/src/caffe/test/test_flatten_layer.cpp
+++ b/src/caffe/test/test_flatten_layer.cpp
@@ -1,8 +1,9 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -28,7 +29,7 @@ class FlattenLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~FlattenLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
@@ -80,7 +81,8 @@ TYPED_TEST(FlattenLayerTest, TestCPUGradient) {
Caffe::set_mode(Caffe::CPU);
FlattenLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(FlattenLayerTest, TestGPUGradient) {
@@ -88,8 +90,9 @@ TYPED_TEST(FlattenLayerTest, TestGPUGradient) {
Caffe::set_mode(Caffe::GPU);
FlattenLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp
index dc6445d6..842d3a74 100644
--- a/src/caffe/test/test_im2col_layer.cpp
+++ b/src/caffe/test/test_im2col_layer.cpp
@@ -1,8 +1,9 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -28,7 +29,7 @@ class Im2colLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~Im2colLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
@@ -88,7 +89,8 @@ TYPED_TEST(Im2colLayerTest, TestCPUGradient) {
Caffe::set_mode(Caffe::CPU);
Im2colLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(Im2colLayerTest, TestGPUGradient) {
@@ -98,8 +100,9 @@ TYPED_TEST(Im2colLayerTest, TestGPUGradient) {
Caffe::set_mode(Caffe::GPU);
Im2colLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_innerproduct_layer.cpp b/src/caffe/test/test_innerproduct_layer.cpp
index 0e2b612f..acb4c767 100644
--- a/src/caffe/test/test_innerproduct_layer.cpp
+++ b/src/caffe/test/test_innerproduct_layer.cpp
@@ -1,8 +1,9 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -28,7 +29,7 @@ class InnerProductLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~InnerProductLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
@@ -43,7 +44,7 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) {
LayerParameter layer_param;
layer_param.set_num_output(10);
shared_ptr<InnerProductLayer<TypeParam> > layer(
- new InnerProductLayer<TypeParam>(layer_param));
+ new InnerProductLayer<TypeParam>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->height(), 1);
@@ -60,37 +61,37 @@ TYPED_TEST(InnerProductLayerTest, TestCPU) {
layer_param.mutable_bias_filler()->set_min(1);
layer_param.mutable_bias_filler()->set_max(2);
shared_ptr<InnerProductLayer<TypeParam> > layer(
- new InnerProductLayer<TypeParam>(layer_param));
+ new InnerProductLayer<TypeParam>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
const TypeParam* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
for (int i = 0; i < count; ++i) {
- EXPECT_GE(data[i], 1.);
+ EXPECT_GE(data[i], 1.);
}
}
TYPED_TEST(InnerProductLayerTest, TestGPU) {
- if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- layer_param.set_num_output(10);
- layer_param.mutable_weight_filler()->set_type("uniform");
- layer_param.mutable_bias_filler()->set_type("uniform");
- layer_param.mutable_bias_filler()->set_min(1);
- layer_param.mutable_bias_filler()->set_max(2);
- shared_ptr<InnerProductLayer<TypeParam> > layer(
- new InnerProductLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
- const int count = this->blob_top_->count();
- for (int i = 0; i < count; ++i) {
- EXPECT_GE(data[i], 1.);
- }
- } else {
- LOG(ERROR) << "Skipping test due to old architecture.";
- }
+ if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
+ LayerParameter layer_param;
+ Caffe::set_mode(Caffe::GPU);
+ layer_param.set_num_output(10);
+ layer_param.mutable_weight_filler()->set_type("uniform");
+ layer_param.mutable_bias_filler()->set_type("uniform");
+ layer_param.mutable_bias_filler()->set_min(1);
+ layer_param.mutable_bias_filler()->set_max(2);
+ shared_ptr<InnerProductLayer<TypeParam> > layer(
+ new InnerProductLayer<TypeParam>(layer_param));
+ layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ const TypeParam* data = this->blob_top_->cpu_data();
+ const int count = this->blob_top_->count();
+ for (int i = 0; i < count; ++i) {
+ EXPECT_GE(data[i], 1.);
+ }
+ } else {
+ LOG(ERROR) << "Skipping test due to old architecture.";
+ }
}
TYPED_TEST(InnerProductLayerTest, TestCPUGradient) {
@@ -103,7 +104,8 @@ TYPED_TEST(InnerProductLayerTest, TestCPUGradient) {
layer_param.mutable_bias_filler()->set_max(2);
InnerProductLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(InnerProductLayerTest, TestGPUGradient) {
@@ -121,4 +123,4 @@ TYPED_TEST(InnerProductLayerTest, TestGPUGradient) {
}
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp
index 757bac33..6c778df9 100644
--- a/src/caffe/test/test_lrn_layer.cpp
+++ b/src/caffe/test/test_lrn_layer.cpp
@@ -2,9 +2,9 @@
#include <algorithm>
#include <cstring>
-#include <cuda_runtime.h>
-#include <iostream>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -26,7 +26,7 @@ class LRNLayerTest : public ::testing::Test {
protected:
LRNLayerTest()
: blob_bottom_(new Blob<Dtype>()),
- blob_top_(new Blob<Dtype>()) {};
+ blob_top_(new Blob<Dtype>()) {}
virtual void SetUp() {
Caffe::set_random_seed(1701);
blob_bottom_->Reshape(2, 7, 3, 3);
@@ -36,7 +36,7 @@ class LRNLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~LRNLayerTest() { delete blob_bottom_; delete blob_top_; }
void ReferenceLRNForward(const Blob<Dtype>& blob_bottom,
const LayerParameter& layer_param, Blob<Dtype>* blob_top);
@@ -135,10 +135,12 @@ TYPED_TEST(LRNLayerTest, TestCPUGradient) {
this->blob_top_->mutable_cpu_diff()[i] = 1.;
}
layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
- //for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] << std::endl;
- //}
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ // for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i]
+ // << std::endl;
+ // }
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(LRNLayerTest, TestGPUGradient) {
@@ -152,10 +154,12 @@ TYPED_TEST(LRNLayerTest, TestGPUGradient) {
this->blob_top_->mutable_cpu_diff()[i] = 1.;
}
layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
- //for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- // std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i] << std::endl;
- //}
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ // for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ // std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i]
+ // << std::endl;
+ // }
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
index 5595c84f..835d1b20 100644
--- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
+++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
@@ -3,8 +3,9 @@
#include <cmath>
#include <cstdlib>
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -30,6 +31,7 @@ class MultinomialLogisticLossLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_data_);
blob_bottom_vec_.push_back(blob_bottom_data_);
for (int i = 0; i < blob_bottom_label_->count(); ++i) {
+ // NOLINT_NEXTLINE(runtime/threadsafe_fn)
blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5;
}
blob_bottom_vec_.push_back(blob_bottom_label_);
@@ -58,4 +60,4 @@ TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) {
this->blob_top_vec_, 0, -1, -1);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp
index 8674519f..9a7ff5c8 100644
--- a/src/caffe/test/test_neuron_layer.cpp
+++ b/src/caffe/test/test_neuron_layer.cpp
@@ -1,8 +1,9 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -28,7 +29,7 @@ class NeuronLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~NeuronLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
@@ -60,7 +61,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientCPU) {
Caffe::set_mode(Caffe::CPU);
ReLULayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
@@ -85,7 +87,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) {
Caffe::set_mode(Caffe::GPU);
ReLULayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
@@ -100,7 +103,7 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidCPU) {
const TypeParam* top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i])));
- //check that we squashed the value between 0 and 1
+ // check that we squashed the value between 0 and 1
EXPECT_GE(top_data[i], 0.);
EXPECT_LE(top_data[i], 1.);
}
@@ -112,7 +115,8 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradientCPU) {
Caffe::set_mode(Caffe::CPU);
SigmoidLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) {
@@ -126,7 +130,7 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) {
const TypeParam* top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i])));
- //check that we squashed the value between 0 and 1
+ // check that we squashed the value between 0 and 1
EXPECT_GE(top_data[i], 0.);
EXPECT_LE(top_data[i], 1.);
}
@@ -138,7 +142,8 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradientGPU) {
Caffe::set_mode(Caffe::GPU);
SigmoidLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
@@ -167,7 +172,8 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPU) {
Caffe::set_mode(Caffe::CPU);
DropoutLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
@@ -264,7 +270,8 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradientCPU) {
Caffe::set_mode(Caffe::CPU);
BNLLLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
@@ -289,10 +296,9 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradientGPU) {
Caffe::set_mode(Caffe::GPU);
BNLLLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-
-
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_platform.cpp b/src/caffe/test/test_platform.cpp
index ea3cee2f..bd2dcd33 100644
--- a/src/caffe/test/test_platform.cpp
+++ b/src/caffe/test/test_platform.cpp
@@ -2,11 +2,10 @@
#include <cstdlib>
#include <cstdio>
-#include <iostream>
-#include <cuda_runtime.h>
-#include <glog/logging.h>
-#include <gtest/gtest.h>
+#include "cuda_runtime.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
#include "caffe/test/test_caffe_main.hpp"
namespace caffe {
@@ -19,22 +18,35 @@ TEST_F(PlatformTest, TestInitialization) {
printf("Major revision number: %d\n", CAFFE_TEST_CUDA_PROP.major);
printf("Minor revision number: %d\n", CAFFE_TEST_CUDA_PROP.minor);
printf("Name: %s\n", CAFFE_TEST_CUDA_PROP.name);
- printf("Total global memory: %lu\n", CAFFE_TEST_CUDA_PROP.totalGlobalMem);
- printf("Total shared memory per block: %lu\n", CAFFE_TEST_CUDA_PROP.sharedMemPerBlock);
- printf("Total registers per block: %d\n", CAFFE_TEST_CUDA_PROP.regsPerBlock);
- printf("Warp size: %d\n", CAFFE_TEST_CUDA_PROP.warpSize);
- printf("Maximum memory pitch: %lu\n", CAFFE_TEST_CUDA_PROP.memPitch);
- printf("Maximum threads per block: %d\n", CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock);
+ printf("Total global memory: %lu\n",
+ CAFFE_TEST_CUDA_PROP.totalGlobalMem);
+ printf("Total shared memory per block: %lu\n",
+ CAFFE_TEST_CUDA_PROP.sharedMemPerBlock);
+ printf("Total registers per block: %d\n",
+ CAFFE_TEST_CUDA_PROP.regsPerBlock);
+ printf("Warp size: %d\n",
+ CAFFE_TEST_CUDA_PROP.warpSize);
+ printf("Maximum memory pitch: %lu\n",
+ CAFFE_TEST_CUDA_PROP.memPitch);
+ printf("Maximum threads per block: %d\n",
+ CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
- printf("Maximum dimension %d of block: %d\n", i, CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]);
+ printf("Maximum dimension %d of block: %d\n", i,
+ CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
- printf("Maximum dimension %d of grid: %d\n", i, CAFFE_TEST_CUDA_PROP.maxGridSize[i]);
- printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate);
- printf("Total constant memory: %lu\n", CAFFE_TEST_CUDA_PROP.totalConstMem);
- printf("Texture alignment: %lu\n", CAFFE_TEST_CUDA_PROP.textureAlignment);
- printf("Concurrent copy and execution: %s\n", (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No"));
- printf("Number of multiprocessors: %d\n", CAFFE_TEST_CUDA_PROP.multiProcessorCount);
- printf("Kernel execution timeout: %s\n", (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No"));
+ printf("Maximum dimension %d of grid: %d\n", i,
+ CAFFE_TEST_CUDA_PROP.maxGridSize[i]);
+ printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate);
+ printf("Total constant memory: %lu\n",
+ CAFFE_TEST_CUDA_PROP.totalConstMem);
+ printf("Texture alignment: %lu\n",
+ CAFFE_TEST_CUDA_PROP.textureAlignment);
+ printf("Concurrent copy and execution: %s\n",
+ (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No"));
+ printf("Number of multiprocessors: %d\n",
+ CAFFE_TEST_CUDA_PROP.multiProcessorCount);
+ printf("Kernel execution timeout: %s\n",
+ (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No"));
EXPECT_TRUE(true);
}
diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp
index 67cae131..c08a7c08 100644
--- a/src/caffe/test/test_pooling_layer.cpp
+++ b/src/caffe/test/test_pooling_layer.cpp
@@ -1,8 +1,9 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -21,7 +22,7 @@ class PoolingLayerTest : public ::testing::Test {
protected:
PoolingLayerTest()
: blob_bottom_(new Blob<Dtype>()),
- blob_top_(new Blob<Dtype>()) {};
+ blob_top_(new Blob<Dtype>()) {}
virtual void SetUp() {
Caffe::set_random_seed(1701);
blob_bottom_->Reshape(2, 3, 6, 5);
@@ -31,7 +32,7 @@ class PoolingLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~PoolingLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
@@ -89,7 +90,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) {
Caffe::set_mode(Caffe::CPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) {
@@ -100,7 +102,8 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) {
Caffe::set_mode(Caffe::GPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
@@ -112,7 +115,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) {
Caffe::set_mode(Caffe::CPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
@@ -124,8 +128,9 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientAve) {
Caffe::set_mode(Caffe::GPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_protobuf.cpp b/src/caffe/test/test_protobuf.cpp
index 11cdcf69..d8d511dd 100644
--- a/src/caffe/test/test_protobuf.cpp
+++ b/src/caffe/test/test_protobuf.cpp
@@ -4,7 +4,7 @@
// format. Nothing special here and no actual code is being tested.
#include <string>
-#include <google/protobuf/text_format.h>
+#include "google/protobuf/text_format.h"
#include "gtest/gtest.h"
#include "caffe/test/test_caffe_main.hpp"
#include "caffe/proto/caffe.pb.h"
@@ -26,4 +26,4 @@ TEST_F(ProtoTest, TestSerialization) {
EXPECT_TRUE(true);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp
index fc1c1b70..d27f40cf 100644
--- a/src/caffe/test/test_softmax_layer.cpp
+++ b/src/caffe/test/test_softmax_layer.cpp
@@ -2,8 +2,9 @@
#include <cmath>
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -29,7 +30,7 @@ class SoftmaxLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~SoftmaxLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
@@ -77,7 +78,8 @@ TYPED_TEST(SoftmaxLayerTest, TestGradientCPU) {
Caffe::set_mode(Caffe::CPU);
SoftmaxLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp
index 328f64b9..bf96d99c 100644
--- a/src/caffe/test/test_softmax_with_loss_layer.cpp
+++ b/src/caffe/test/test_softmax_with_loss_layer.cpp
@@ -3,8 +3,9 @@
#include <cmath>
#include <cstdlib>
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -31,6 +32,7 @@ class SoftmaxWithLossLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_data_);
blob_bottom_vec_.push_back(blob_bottom_data_);
for (int i = 0; i < blob_bottom_label_->count(); ++i) {
+ // NOLINT_NEXTLINE(runtime/threadsafe_fn)
blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5;
}
blob_bottom_vec_.push_back(blob_bottom_label_);
@@ -69,4 +71,4 @@ TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientGPU) {
this->blob_top_vec_, 0, -1, -1);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp
index 3311c9ac..8c1780c3 100644
--- a/src/caffe/test/test_split_layer.cpp
+++ b/src/caffe/test/test_split_layer.cpp
@@ -1,9 +1,11 @@
// Copyright 2014 Jeff Donahue
#include <cstring>
-#include <cuda_runtime.h>
-#include <google/protobuf/text_format.h>
+#include <string>
+#include <vector>
+#include "cuda_runtime.h"
+#include "google/protobuf/text_format.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -32,7 +34,7 @@ class SplitLayerTest : public ::testing::Test {
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_a_);
blob_top_vec_.push_back(blob_top_b_);
- };
+ }
virtual ~SplitLayerTest() {
delete blob_bottom_;
delete blob_top_a_;
@@ -156,7 +158,6 @@ TYPED_TEST(SplitLayerTest, TestGPUGradientInPlace) {
template <typename Dtype>
class SplitLayerInsertionTest : public ::testing::Test {
protected:
- SplitLayerInsertionTest() { };
void RunInsertionTest(
const string& input_param_string, const string& output_param_string) {
// Test that insert_splits called on the proto specified by
@@ -1125,4 +1126,4 @@ TYPED_TEST(SplitLayerInsertionTest, TestWithInPlace) {
this->RunInsertionTest(input_proto, expected_output_proto);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_stochastic_pooing.cpp b/src/caffe/test/test_stochastic_pooling.cpp
index e2b60eee..7829b943 100644
--- a/src/caffe/test/test_stochastic_pooing.cpp
+++ b/src/caffe/test/test_stochastic_pooling.cpp
@@ -1,8 +1,10 @@
// Copyright 2013 Yangqing Jia
+#include <algorithm>
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -12,6 +14,8 @@
#include "caffe/test/test_caffe_main.hpp"
+using std::min;
+
namespace caffe {
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
@@ -21,7 +25,7 @@ class StochasticPoolingLayerTest : public ::testing::Test {
protected:
StochasticPoolingLayerTest()
: blob_bottom_(new Blob<Dtype>()),
- blob_top_(new Blob<Dtype>()) {};
+ blob_top_(new Blob<Dtype>()) {}
virtual void SetUp() {
Caffe::set_random_seed(1701);
blob_bottom_->Reshape(2, 3, 6, 5);
@@ -33,7 +37,7 @@ class StochasticPoolingLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~StochasticPoolingLayerTest() {
delete blob_bottom_; delete blob_top_;
@@ -89,7 +93,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) {
bool has_equal = false;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
- has_equal |= (pooled == bottom_data[this->blob_bottom_->offset(n, c, h, w)]);
+ has_equal |= (pooled == bottom_data[this->blob_bottom_->
+ offset(n, c, h, w)]);
}
}
EXPECT_TRUE(has_equal);
@@ -130,7 +135,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) {
bool smaller_than_max = false;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
- smaller_than_max |= (pooled <= bottom_data[this->blob_bottom_->offset(n, c, h, w)]);
+ smaller_than_max |= (pooled <= bottom_data[this->blob_bottom_->
+ offset(n, c, h, w)]);
}
}
EXPECT_TRUE(smaller_than_max);
@@ -159,4 +165,4 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) {
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp
index b8347107..161ca458 100644
--- a/src/caffe/test/test_syncedmem.cpp
+++ b/src/caffe/test/test_syncedmem.cpp
@@ -1,8 +1,9 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
@@ -36,29 +37,31 @@ TEST_F(SyncedMemoryTest, TestCPUWrite) {
EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
memset(cpu_data, 1, mem.size());
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ(((char*)cpu_data)[i], 1);
+ EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 1);
}
const void* gpu_data = mem.gpu_data();
EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
// check if values are the same
char* recovered_value = new char[10];
- cudaMemcpy((void*)recovered_value, gpu_data, 10, cudaMemcpyDeviceToHost);
+ cudaMemcpy(reinterpret_cast<void*>(recovered_value), gpu_data, 10,
+ cudaMemcpyDeviceToHost);
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ(((char*)recovered_value)[i], 1);
+ EXPECT_EQ((reinterpret_cast<char*>(recovered_value))[i], 1);
}
// do another round
cpu_data = mem.mutable_cpu_data();
EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
memset(cpu_data, 2, mem.size());
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ(((char*)cpu_data)[i], 2);
+ EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 2);
}
gpu_data = mem.gpu_data();
EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
// check if values are the same
- cudaMemcpy((void*)recovered_value, gpu_data, 10, cudaMemcpyDeviceToHost);
+ cudaMemcpy(reinterpret_cast<void*>(recovered_value), gpu_data, 10,
+ cudaMemcpyDeviceToHost);
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ(((char*)recovered_value)[i], 2);
+ EXPECT_EQ((reinterpret_cast<char*>(recovered_value))[i], 2);
}
delete[] recovered_value;
}
@@ -70,7 +73,7 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) {
CUDA_CHECK(cudaMemset(gpu_data, 1, mem.size()));
const void* cpu_data = mem.cpu_data();
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ(((char*)cpu_data)[i], 1);
+ EXPECT_EQ((reinterpret_cast<const char*>(cpu_data))[i], 1);
}
EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
@@ -79,9 +82,9 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) {
CUDA_CHECK(cudaMemset(gpu_data, 2, mem.size()));
cpu_data = mem.cpu_data();
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ(((char*)cpu_data)[i], 2);
+ EXPECT_EQ((reinterpret_cast<const char*>(cpu_data))[i], 2);
}
EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp
index a4226a28..9c80ac24 100644
--- a/src/caffe/test/test_tanh_layer.cpp
+++ b/src/caffe/test/test_tanh_layer.cpp
@@ -1,10 +1,11 @@
// Copyright 2014 Aravindh Mahendran
-// Adapted from other test files
+// Adapted from other test files
#include <cmath>
#include <cstring>
-#include <cuda_runtime.h>
+#include <vector>
+#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
@@ -30,7 +31,7 @@ class TanHLayerTest : public ::testing::Test {
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
- };
+ }
virtual ~TanHLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
@@ -52,10 +53,12 @@ TYPED_TEST(TanHLayerTest, TestForwardCPU) {
for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
for (int k = 0; k < this->blob_bottom_->height(); ++k) {
for (int l = 0; l < this->blob_bottom_->width(); ++l) {
- EXPECT_GE(this->blob_top_->data_at(i,j,k,l) + 1e-4,
- (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1));
- EXPECT_LE(this->blob_top_->data_at(i,j,k,l) - 1e-4,
- (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1));
+ EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4,
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
+ EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4,
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
}
}
}
@@ -67,7 +70,8 @@ TYPED_TEST(TanHLayerTest, TestGradientCPU) {
Caffe::set_mode(Caffe::CPU);
TanHLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(TanHLayerTest, TestForwardGPU) {
@@ -81,10 +85,12 @@ TYPED_TEST(TanHLayerTest, TestForwardGPU) {
for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
for (int k = 0; k < this->blob_bottom_->height(); ++k) {
for (int l = 0; l < this->blob_bottom_->width(); ++l) {
- EXPECT_GE(this->blob_top_->data_at(i,j,k,l) + 1e-4,
- (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1));
- EXPECT_LE(this->blob_top_->data_at(i,j,k,l) - 1e-4,
- (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1));
+ EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4,
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
+ EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4,
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
+ (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
}
}
}
@@ -96,7 +102,8 @@ TYPED_TEST(TanHLayerTest, TestGradientGPU) {
Caffe::set_mode(Caffe::GPU);
TanHLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
-}
+} // namespace caffe
diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp
index 3fed148c..3f3ff8b3 100644
--- a/src/caffe/test/test_util_blas.cpp
+++ b/src/caffe/test/test_util_blas.cpp
@@ -1,9 +1,10 @@
// Copyright 2013 Yangqing Jia
#include <cstring>
-#include <cuda_runtime.h>
-#include <mkl.h>
-#include <cublas_v2.h>
+
+#include "cuda_runtime.h"
+#include "mkl.h"
+#include "cublas_v2.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
@@ -23,18 +24,18 @@ class GemmTest : public ::testing::Test {};
TYPED_TEST_CASE(GemmTest, Dtypes);
TYPED_TEST(GemmTest, TestGemm) {
- Blob<TypeParam> A(1,1,2,3);
- Blob<TypeParam> B(1,1,3,4);
- Blob<TypeParam> C(1,1,2,4);
+ Blob<TypeParam> A(1, 1, 2, 3);
+ Blob<TypeParam> B(1, 1, 3, 4);
+ Blob<TypeParam> C(1, 1, 2, 4);
TypeParam data[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6};
- TypeParam B_reshape_data[12] = {1,5,9,2,6,10,3,7,11,4,8,12};
- TypeParam result[8] = {38,44,50,56,83,98,113,128};
+ TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12};
+ TypeParam result[8] = {38, 44, 50, 56, 83, 98, 113, 128};
memcpy(A.mutable_cpu_data(), data, 6 * sizeof(TypeParam));
memcpy(B.mutable_cpu_data(), data, 12 * sizeof(TypeParam));
if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
- //[1,2,3; 4 5 6] * [1,2,3,4; 5,6,7,8; 9,10,11,12];
+ // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12];
caffe_cpu_gemm<TypeParam>(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1.,
A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data());
for (int i = 0; i < 8; ++i) {
@@ -47,7 +48,7 @@ TYPED_TEST(GemmTest, TestGemm) {
}
// Test when we have a transposed A
- A.Reshape(1,1,3,2);
+ A.Reshape(1, 1, 3, 2);
memcpy(A.mutable_cpu_data(), A_reshape_data, 6 * sizeof(TypeParam));
caffe_cpu_gemm<TypeParam>(CblasTrans, CblasNoTrans, 2, 4, 3, 1.,
A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data());
@@ -61,7 +62,7 @@ TYPED_TEST(GemmTest, TestGemm) {
}
// Test when we have a transposed A and a transposed B too
- B.Reshape(1,1,4,3);
+ B.Reshape(1, 1, 4, 3);
memcpy(B.mutable_cpu_data(), B_reshape_data, 12 * sizeof(TypeParam));
caffe_cpu_gemm<TypeParam>(CblasTrans, CblasTrans, 2, 4, 3, 1.,
A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data());
@@ -75,7 +76,7 @@ TYPED_TEST(GemmTest, TestGemm) {
}
// Test when we have a transposed B
- A.Reshape(1,1,2,3);
+ A.Reshape(1, 1, 2, 3);
memcpy(A.mutable_cpu_data(), data, 6 * sizeof(TypeParam));
caffe_cpu_gemm<TypeParam>(CblasNoTrans, CblasTrans, 2, 4, 3, 1.,
A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data());
@@ -94,9 +95,9 @@ TYPED_TEST(GemmTest, TestGemm) {
TYPED_TEST(GemmTest, TestGemv) {
- Blob<TypeParam> A(1,1,2,3);
- Blob<TypeParam> x(1,1,1,3);
- Blob<TypeParam> y(1,1,1,2);
+ Blob<TypeParam> A(1, 1, 2, 3);
+ Blob<TypeParam> x(1, 1, 1, 3);
+ Blob<TypeParam> y(1, 1, 1, 2);
TypeParam data[6] = {1, 2, 3, 4, 5, 6};
TypeParam result_2[2] = {14, 32};
TypeParam result_3[3] = {9, 12, 15};
@@ -132,4 +133,4 @@ TYPED_TEST(GemmTest, TestGemv) {
}
}
-}
+} // namespace caffe
diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp
index b32f6eee..4ed3af8a 100644
--- a/src/caffe/util/im2col.cpp
+++ b/src/caffe/util/im2col.cpp
@@ -10,8 +10,8 @@ namespace caffe {
template <typename Dtype>
void im2col_cpu(const Dtype* data_im, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- Dtype* data_col) {
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, Dtype* data_col) {
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
@@ -21,13 +21,13 @@ void im2col_cpu(const Dtype* data_im, const int channels,
int c_im = c / ksize / ksize;
for (int h = 0; h < height_col; ++h) {
for (int w = 0; w < width_col; ++w) {
- int h_pad = h * stride - pad + h_offset;
- int w_pad = w * stride - pad + w_offset;
- if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width)
- data_col[(c * height_col + h) * width_col + w] =
- data_im[(c_im * height + h_pad) * width + w_pad];
- else
- data_col[(c * height_col + h) * width_col + w] = 0;
+ int h_pad = h * stride - pad + h_offset;
+ int w_pad = w * stride - pad + w_offset;
+ if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width)
+ data_col[(c * height_col + h) * width_col + w] =
+ data_im[(c_im * height + h_pad) * width + w_pad];
+ else
+ data_col[(c * height_col + h) * width_col + w] = 0;
}
}
}
@@ -35,16 +35,16 @@ void im2col_cpu(const Dtype* data_im, const int channels,
// Explicit instantiation
template void im2col_cpu<float>(const float* data_im, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- float* data_col);
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, float* data_col);
template void im2col_cpu<double>(const double* data_im, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- double* data_col);
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, double* data_col);
template <typename Dtype>
void col2im_cpu(const Dtype* data_col, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- Dtype* data_im) {
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, Dtype* data_im) {
memset(data_im, 0, sizeof(Dtype) * height * width * channels);
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
@@ -55,10 +55,11 @@ void col2im_cpu(const Dtype* data_col, const int channels,
int c_im = c / ksize / ksize;
for (int h = 0; h < height_col; ++h) {
for (int w = 0; w < width_col; ++w) {
- int h_pad = h * stride - pad + h_offset;
- int w_pad = w * stride - pad + w_offset;
- if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width)
- data_im[(c_im * height + h_pad) * width + w_pad] += data_col[(c * height_col + h) * width_col + w];
+ int h_pad = h * stride - pad + h_offset;
+ int w_pad = w * stride - pad + w_offset;
+ if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width)
+ data_im[(c_im * height + h_pad) * width + w_pad] +=
+ data_col[(c * height_col + h) * width_col + w];
}
}
}
@@ -66,10 +67,10 @@ void col2im_cpu(const Dtype* data_col, const int channels,
// Explicit instantiation
template void col2im_cpu<float>(const float* data_col, const int channels,
- const int height, const int width, const int psize, const int pad, const int stride,
- float* data_im);
+ const int height, const int width, const int psize, const int pad,
+ const int stride, float* data_im);
template void col2im_cpu<double>(const double* data_col, const int channels,
- const int height, const int width, const int psize, const int pad, const int stride,
- double* data_im);
+ const int height, const int width, const int psize, const int pad,
+ const int stride, double* data_im);
} // namespace caffe
diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu
index 7f1376d6..c3c87364 100644
--- a/src/caffe/util/im2col.cu
+++ b/src/caffe/util/im2col.cu
@@ -1,5 +1,6 @@
// Copyright 2013 Yangqing Jia
+#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
@@ -11,8 +12,9 @@ namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
- const int height, const int width, const int ksize, const int pad,
- const int stride, const int height_col, const int width_col, Dtype* data_col) {
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, const int height_col, const int width_col,
+ Dtype* data_col) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
int w_out = index % width_col;
@@ -26,10 +28,11 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
- int h = h_in + i;
- int w = w_in + j;
- *data_col = (h >= 0 && w >= 0 && h < width && w < height) ? data_im[i * width + j] : 0;
- data_col += height_col * width_col;
+ int h = h_in + i;
+ int w = w_in + j;
+ *data_col = (h >= 0 && w >= 0 && h < width && w < height) ?
+ data_im[i * width + j] : 0;
+ data_col += height_col * width_col;
}
}
}
@@ -37,32 +40,35 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- Dtype* data_col) {
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
- im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
- num_kernels, data_im, height, width, ksize, pad, stride, height_col, width_col,
- data_col);
+ // NOLINT_NEXTLINE(whitespace/operators)
+ im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
+ CAFFE_CUDA_NUM_THREADS>>>(
+ num_kernels, data_im, height, width, ksize, pad, stride, height_col,
+ width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- float* data_col);
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- double* data_col);
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
- const int height, const int width, const int channels, const int ksize, const int pad,
- const int stride, const int height_col, const int width_col, Dtype* data_im) {
+ const int height, const int width, const int channels, const int ksize,
+ const int pad, const int stride, const int height_col, const int width_col,
+ Dtype* data_im) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
Dtype val = 0;
@@ -98,15 +104,18 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
- const int height, const int width, const int ksize, const int pad, const int stride,
- Dtype* data_im) {
- //CUDA_CHECK(cudaMemset(data_im, 0, sizeof(Dtype) * height * width * channels));
+ const int height, const int width, const int ksize, const int pad,
+ const int stride, Dtype* data_im) {
+ // CUDA_CHECK(cudaMemset(data_im, 0,
+ // sizeof(Dtype) * height * width * channels));
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
- col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
+ // NOLINT_NEXTLINE(whitespace/operators)
+ col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
+ CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, ksize, pad, stride,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
@@ -115,11 +124,11 @@ void col2im_gpu(const Dtype* data_col, const int channels,
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
- const int height, const int width, const int psize, const int pad, const int stride,
- float* data_im);
+ const int height, const int width, const int psize, const int pad,
+ const int stride, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
- const int height, const int width, const int psize, const int pad, const int stride,
- double* data_im);
+ const int height, const int width, const int psize, const int pad,
+ const int stride, double* data_im);
} // namespace caffe
diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp
index 2ed8127d..190b0d3c 100644
--- a/src/caffe/util/io.cpp
+++ b/src/caffe/util/io.cpp
@@ -12,7 +12,6 @@
#include <algorithm>
#include <string>
-#include <iostream>
#include <fstream>
#include "caffe/common.hpp"
@@ -82,9 +81,6 @@ bool ReadImageToDatum(const string& filename, const int label,
LOG(ERROR) << "Could not open or find file " << filename;
return false;
}
- if (height > 0 && width > 0) {
-
- }
datum->set_channels(3);
datum->set_height(cv_img.rows);
datum->set_width(cv_img.cols);
@@ -95,7 +91,8 @@ bool ReadImageToDatum(const string& filename, const int label,
for (int c = 0; c < 3; ++c) {
for (int h = 0; h < cv_img.rows; ++h) {
for (int w = 0; w < cv_img.cols; ++w) {
- datum_string->push_back(static_cast<char>(cv_img.at<cv::Vec3b>(h, w)[c]));
+ datum_string->push_back(
+ static_cast<char>(cv_img.at<cv::Vec3b>(h, w)[c]));
}
}
}
diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu
index e9305810..d063504c 100644
--- a/src/caffe/util/math_functions.cu
+++ b/src/caffe/util/math_functions.cu
@@ -21,6 +21,7 @@ __global__ void mul_kernel(const int n, const Dtype* a,
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
+ // NOLINT_NEXTLINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
@@ -28,6 +29,7 @@ void caffe_gpu_mul<float>(const int N, const float* a,
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
+ // NOLINT_NEXTLINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}