summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJeff Donahue <jeff.donahue@gmail.com>2014-02-25 15:58:20 -0800
committerEvan Shelhamer <shelhamer@imaginarynumber.net>2014-02-26 15:42:38 -0800
commite4ff9d08aa7697ed42024686f6c4ce4fe589b9bd (patch)
treeda80f5bcc5199726a3e675b2ad18dd846b041ce9 /src
parent22fa0a2945e58d3f748071034b54ec8610fc265a (diff)
downloadcaffeonacl-e4ff9d08aa7697ed42024686f6c4ce4fe589b9bd.tar.gz
caffeonacl-e4ff9d08aa7697ed42024686f6c4ce4fe589b9bd.tar.bz2
caffeonacl-e4ff9d08aa7697ed42024686f6c4ce4fe589b9bd.zip
make test_gradient_check_util methods use pointers for non-const inputs
(also change EXPECT_LT and EXPECT_GT pair to EXPECT_NEAR)
Diffstat (limited to 'src')
-rw-r--r--src/caffe/test/test_convolution_layer.cpp16
-rw-r--r--src/caffe/test/test_euclidean_loss_layer.cpp4
-rw-r--r--src/caffe/test/test_flatten_layer.cpp8
-rw-r--r--src/caffe/test/test_gradient_check_util.hpp79
-rw-r--r--src/caffe/test/test_im2col_layer.cpp8
-rw-r--r--src/caffe/test/test_innerproduct_layer.cpp7
-rw-r--r--src/caffe/test/test_lrn_layer.cpp8
-rw-r--r--src/caffe/test/test_multinomial_logistic_loss_layer.cpp4
-rw-r--r--src/caffe/test/test_neuron_layer.cpp31
-rw-r--r--src/caffe/test/test_pooling_layer.cpp16
-rw-r--r--src/caffe/test/test_softmax_layer.cpp4
-rw-r--r--src/caffe/test/test_softmax_with_loss_layer.cpp8
-rw-r--r--src/caffe/test/test_split_layer.cpp16
-rw-r--r--src/caffe/test/test_stochastic_pooling.cpp3
-rw-r--r--src/caffe/test/test_tanh_layer.cpp8
15 files changed, 110 insertions, 110 deletions
diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp
index 9f47e6d4..e1a36183 100644
--- a/src/caffe/test/test_convolution_layer.cpp
+++ b/src/caffe/test/test_convolution_layer.cpp
@@ -175,8 +175,8 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) {
Caffe::set_mode(Caffe::CPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) {
@@ -190,8 +190,8 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) {
Caffe::set_mode(Caffe::CPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) {
@@ -204,8 +204,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) {
Caffe::set_mode(Caffe::GPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) {
@@ -219,8 +219,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) {
Caffe::set_mode(Caffe::GPU);
ConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
} // namespace caffe
diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp
index 121929f8..d408860c 100644
--- a/src/caffe/test/test_euclidean_loss_layer.cpp
+++ b/src/caffe/test/test_euclidean_loss_layer.cpp
@@ -52,8 +52,8 @@ TYPED_TEST(EuclideanLossLayerTest, TestGradientCPU) {
EuclideanLossLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientSingle(layer, this->blob_bottom_vec_,
- this->blob_top_vec_, 0, -1, -1);
+ checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_), 0, -1, -1);
}
} // namespace caffe
diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp
index 03dff36d..41c04536 100644
--- a/src/caffe/test/test_flatten_layer.cpp
+++ b/src/caffe/test/test_flatten_layer.cpp
@@ -81,8 +81,8 @@ TYPED_TEST(FlattenLayerTest, TestCPUGradient) {
Caffe::set_mode(Caffe::CPU);
FlattenLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(FlattenLayerTest, TestGPUGradient) {
@@ -90,8 +90,8 @@ TYPED_TEST(FlattenLayerTest, TestGPUGradient) {
Caffe::set_mode(Caffe::GPU);
FlattenLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
diff --git a/src/caffe/test/test_gradient_check_util.hpp b/src/caffe/test/test_gradient_check_util.hpp
index d7360085..895e9965 100644
--- a/src/caffe/test/test_gradient_check_util.hpp
+++ b/src/caffe/test/test_gradient_check_util.hpp
@@ -31,27 +31,28 @@ class GradientChecker {
// layers.
// Note that after the gradient check, we do not guarantee that the data
// stored in the layer parameters and the blobs are unchanged.
- void CheckGradient(Layer<Dtype>& layer, vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>& top, int check_bottom = -1) {
- layer.SetUp(bottom, &top);
+ void CheckGradient(Layer<Dtype>* layer, vector<Blob<Dtype>*>* bottom,
+ vector<Blob<Dtype>*>* top, int check_bottom = -1) {
+ layer->SetUp(*bottom, top);
CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1);
}
- void CheckGradientExhaustive(Layer<Dtype>& layer,
- vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
+ void CheckGradientExhaustive(Layer<Dtype>* layer,
+ vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top,
int check_bottom = -1);
- void CheckGradientSingle(Layer<Dtype>& layer, vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>& top, int check_bottom, int top_id,
+ void CheckGradientSingle(Layer<Dtype>* layer, vector<Blob<Dtype>*>* bottom,
+ vector<Blob<Dtype>*>* top, int check_bottom, int top_id,
int top_data_id);
// Checks the gradient of a network. This network should not have any data
// layers or loss layers, since the function does not explicitly deal with
// such cases yet. All input blobs and parameter blobs are going to be
// checked, layer-by-layer to avoid numerical problems to accumulate.
- void CheckGradientNet(Net<Dtype>& net, vector<Blob<Dtype>*>& input);
+ void CheckGradientNet(const Net<Dtype>& net,
+ const vector<Blob<Dtype>*>& input);
protected:
- Dtype GetObjAndGradient(vector<Blob<Dtype>*>& top, int top_id = -1,
+ Dtype GetObjAndGradient(vector<Blob<Dtype>*>* top, int top_id = -1,
int top_data_id = -1);
Dtype stepsize_;
Dtype threshold_;
@@ -65,21 +66,21 @@ class GradientChecker {
template <typename Dtype>
-void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
- vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
+void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>* layer,
+ vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top,
int check_bottom, int top_id, int top_data_id) {
// First, figure out what blobs we need to check against.
vector<Blob<Dtype>*> blobs_to_check;
- for (int i = 0; i < layer.blobs().size(); ++i) {
- blobs_to_check.push_back(layer.blobs()[i].get());
+ for (int i = 0; i < layer->blobs().size(); ++i) {
+ blobs_to_check.push_back(layer->blobs()[i].get());
}
if (check_bottom < 0) {
- for (int i = 0; i < bottom.size(); ++i) {
- blobs_to_check.push_back(bottom[i]);
+ for (int i = 0; i < bottom->size(); ++i) {
+ blobs_to_check.push_back((*bottom)[i]);
}
} else {
- CHECK(check_bottom < bottom.size());
- blobs_to_check.push_back(bottom[check_bottom]);
+ CHECK(check_bottom < bottom->size());
+ blobs_to_check.push_back((*bottom)[check_bottom]);
}
// go through the bottom and parameter blobs
// LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
@@ -91,23 +92,23 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
// First, obtain the original data
Caffe::set_random_seed(seed_);
- layer.Forward(bottom, &top);
+ layer->Forward(*bottom, top);
Dtype computed_objective = GetObjAndGradient(top, top_id, top_data_id);
// Get any additional loss from the layer
- computed_objective += layer.Backward(top, true, &bottom);
+ computed_objective += layer->Backward(*top, true, bottom);
Dtype computed_gradient = current_blob->cpu_diff()[feat_id];
// compute score by adding stepsize
current_blob->mutable_cpu_data()[feat_id] += stepsize_;
Caffe::set_random_seed(seed_);
- layer.Forward(bottom, &top);
+ layer->Forward(*bottom, top);
Dtype positive_objective = GetObjAndGradient(top, top_id, top_data_id);
- positive_objective += layer.Backward(top, true, &bottom);
+ positive_objective += layer->Backward(*top, true, bottom);
// compute score by subtracting stepsize
current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
Caffe::set_random_seed(seed_);
- layer.Forward(bottom, &top);
+ layer->Forward(*bottom, top);
Dtype negative_objective = GetObjAndGradient(top, top_id, top_data_id);
- negative_objective += layer.Backward(top, true, &bottom);
+ negative_objective += layer->Backward(*top, true, bottom);
// Recover stepsize
current_blob->mutable_cpu_data()[feat_id] += stepsize_;
Dtype estimated_gradient = (positive_objective - negative_objective) /
@@ -120,10 +121,7 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
// the scale factor by 1.
Dtype scale = max(
max(fabs(computed_gradient), fabs(estimated_gradient)), 1.);
- EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale)
- << "debug: (top_id, top_data_id, blob_id, feat_id)="
- << top_id << "," << top_data_id << "," << blobid << "," << feat_id;
- EXPECT_LT(computed_gradient, estimated_gradient + threshold_ * scale)
+ EXPECT_NEAR(computed_gradient, estimated_gradient, threshold_ * scale)
<< "debug: (top_id, top_data_id, blob_id, feat_id)="
<< top_id << "," << top_data_id << "," << blobid << "," << feat_id;
}
@@ -135,14 +133,13 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
}
template <typename Dtype>
-void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>& layer,
- vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
- int check_bottom) {
- layer.SetUp(bottom, &top);
+void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>* layer,
+ vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top, int check_bottom) {
+ layer->SetUp(*bottom, top);
// LOG(ERROR) << "Exhaustive Mode.";
- for (int i = 0; i < top.size(); ++i) {
+ for (int i = 0; i < top->size(); ++i) {
// LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
- for (int j = 0; j < top[i]->count(); ++j) {
+ for (int j = 0; j < (*top)[i]->count(); ++j) {
// LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
CheckGradientSingle(layer, bottom, top, check_bottom, i, j);
}
@@ -151,7 +148,7 @@ void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>& layer,
template <typename Dtype>
void GradientChecker<Dtype>::CheckGradientNet(
- Net<Dtype>& net, vector<Blob<Dtype>*>& input) {
+ const Net<Dtype>& net, const vector<Blob<Dtype>*>& input) {
const vector<shared_ptr<Layer<Dtype> > >& layers = net.layers();
vector<vector<Blob<Dtype>*> >& bottom_vecs = net.bottom_vecs();
vector<vector<Blob<Dtype>*> >& top_vecs = net.top_vecs();
@@ -163,13 +160,13 @@ void GradientChecker<Dtype>::CheckGradientNet(
}
template <typename Dtype>
-Dtype GradientChecker<Dtype>::GetObjAndGradient(vector<Blob<Dtype>*>& top,
+Dtype GradientChecker<Dtype>::GetObjAndGradient(vector<Blob<Dtype>*>* top,
int top_id, int top_data_id) {
Dtype loss = 0;
if (top_id < 0) {
// the loss will be half of the sum of squares of all outputs
- for (int i = 0; i < top.size(); ++i) {
- Blob<Dtype>* top_blob = top[i];
+ for (int i = 0; i < top->size(); ++i) {
+ Blob<Dtype>* top_blob = (*top)[i];
const Dtype* top_blob_data = top_blob->cpu_data();
Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
int count = top_blob->count();
@@ -182,13 +179,13 @@ Dtype GradientChecker<Dtype>::GetObjAndGradient(vector<Blob<Dtype>*>& top,
loss /= 2.;
} else {
// the loss will be the top_data_id-th element in the top_id-th blob.
- for (int i = 0; i < top.size(); ++i) {
- Blob<Dtype>* top_blob = top[i];
+ for (int i = 0; i < top->size(); ++i) {
+ Blob<Dtype>* top_blob = (*top)[i];
Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
memset(top_blob_diff, 0, sizeof(Dtype) * top_blob->count());
}
- loss = top[top_id]->cpu_data()[top_data_id];
- top[top_id]->mutable_cpu_diff()[top_data_id] = 1.;
+ loss = (*top)[top_id]->cpu_data()[top_data_id];
+ (*top)[top_id]->mutable_cpu_diff()[top_data_id] = 1.;
}
return loss;
}
diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp
index 842d3a74..ac2f8fe2 100644
--- a/src/caffe/test/test_im2col_layer.cpp
+++ b/src/caffe/test/test_im2col_layer.cpp
@@ -89,8 +89,8 @@ TYPED_TEST(Im2colLayerTest, TestCPUGradient) {
Caffe::set_mode(Caffe::CPU);
Im2colLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(Im2colLayerTest, TestGPUGradient) {
@@ -100,8 +100,8 @@ TYPED_TEST(Im2colLayerTest, TestGPUGradient) {
Caffe::set_mode(Caffe::GPU);
Im2colLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
diff --git a/src/caffe/test/test_innerproduct_layer.cpp b/src/caffe/test/test_innerproduct_layer.cpp
index acb4c767..eac33b9c 100644
--- a/src/caffe/test/test_innerproduct_layer.cpp
+++ b/src/caffe/test/test_innerproduct_layer.cpp
@@ -104,8 +104,8 @@ TYPED_TEST(InnerProductLayerTest, TestCPUGradient) {
layer_param.mutable_bias_filler()->set_max(2);
InnerProductLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(InnerProductLayerTest, TestGPUGradient) {
@@ -117,7 +117,8 @@ TYPED_TEST(InnerProductLayerTest, TestGPUGradient) {
layer_param.mutable_bias_filler()->set_type("gaussian");
InnerProductLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradient(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
} else {
LOG(ERROR) << "Skipping test due to old architecture.";
}
diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp
index 6c778df9..cbdb7d14 100644
--- a/src/caffe/test/test_lrn_layer.cpp
+++ b/src/caffe/test/test_lrn_layer.cpp
@@ -139,8 +139,8 @@ TYPED_TEST(LRNLayerTest, TestCPUGradient) {
// std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i]
// << std::endl;
// }
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(LRNLayerTest, TestGPUGradient) {
@@ -158,8 +158,8 @@ TYPED_TEST(LRNLayerTest, TestGPUGradient) {
// std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i]
// << std::endl;
// }
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
} // namespace caffe
diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
index 835d1b20..85285d00 100644
--- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
+++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
@@ -56,8 +56,8 @@ TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) {
MultinomialLogisticLossLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0, 0.05);
- checker.CheckGradientSingle(layer, this->blob_bottom_vec_,
- this->blob_top_vec_, 0, -1, -1);
+ checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_), 0, -1, -1);
}
} // namespace caffe
diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp
index 9a7ff5c8..b2367029 100644
--- a/src/caffe/test/test_neuron_layer.cpp
+++ b/src/caffe/test/test_neuron_layer.cpp
@@ -61,8 +61,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientCPU) {
Caffe::set_mode(Caffe::CPU);
ReLULayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
@@ -87,8 +87,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) {
Caffe::set_mode(Caffe::GPU);
ReLULayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
@@ -115,8 +115,8 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradientCPU) {
Caffe::set_mode(Caffe::CPU);
SigmoidLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) {
@@ -142,8 +142,8 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradientGPU) {
Caffe::set_mode(Caffe::GPU);
SigmoidLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
@@ -172,8 +172,8 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPU) {
Caffe::set_mode(Caffe::CPU);
DropoutLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
@@ -223,7 +223,8 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPU) {
GradientChecker<TypeParam> checker(1e-2, 1e-3);
// it is too expensive to call curand multiple times, so we don't do an
// exhaustive gradient check.
- checker.CheckGradient(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
} else {
LOG(ERROR) << "Skipping test to spare my laptop.";
}
@@ -270,8 +271,8 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradientCPU) {
Caffe::set_mode(Caffe::CPU);
BNLLLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
@@ -296,8 +297,8 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradientGPU) {
Caffe::set_mode(Caffe::GPU);
BNLLLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp
index c08a7c08..ae2e51ed 100644
--- a/src/caffe/test/test_pooling_layer.cpp
+++ b/src/caffe/test/test_pooling_layer.cpp
@@ -90,8 +90,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) {
Caffe::set_mode(Caffe::CPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) {
@@ -102,8 +102,8 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) {
Caffe::set_mode(Caffe::GPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
@@ -115,8 +115,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) {
Caffe::set_mode(Caffe::CPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
@@ -128,8 +128,8 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientAve) {
Caffe::set_mode(Caffe::GPU);
PoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp
index d27f40cf..1d4260a5 100644
--- a/src/caffe/test/test_softmax_layer.cpp
+++ b/src/caffe/test/test_softmax_layer.cpp
@@ -78,8 +78,8 @@ TYPED_TEST(SoftmaxLayerTest, TestGradientCPU) {
Caffe::set_mode(Caffe::CPU);
SoftmaxLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
} // namespace caffe
diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp
index bf96d99c..c8c417a3 100644
--- a/src/caffe/test/test_softmax_with_loss_layer.cpp
+++ b/src/caffe/test/test_softmax_with_loss_layer.cpp
@@ -57,8 +57,8 @@ TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientCPU) {
SoftmaxWithLossLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientSingle(layer, this->blob_bottom_vec_,
- this->blob_top_vec_, 0, -1, -1);
+ checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_), 0, -1, -1);
}
TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientGPU) {
@@ -67,8 +67,8 @@ TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientGPU) {
SoftmaxWithLossLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientSingle(layer, this->blob_bottom_vec_,
- this->blob_top_vec_, 0, -1, -1);
+ checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_), 0, -1, -1);
}
} // namespace caffe
diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp
index 8c1780c3..afec9c9d 100644
--- a/src/caffe/test/test_split_layer.cpp
+++ b/src/caffe/test/test_split_layer.cpp
@@ -121,8 +121,8 @@ TYPED_TEST(SplitLayerTest, TestCPUGradient) {
Caffe::set_mode(Caffe::CPU);
SplitLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(SplitLayerTest, TestGPUGradient) {
@@ -130,8 +130,8 @@ TYPED_TEST(SplitLayerTest, TestGPUGradient) {
Caffe::set_mode(Caffe::GPU);
SplitLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(SplitLayerTest, TestCPUGradientInPlace) {
@@ -140,8 +140,8 @@ TYPED_TEST(SplitLayerTest, TestCPUGradientInPlace) {
SplitLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(SplitLayerTest, TestGPUGradientInPlace) {
@@ -150,8 +150,8 @@ TYPED_TEST(SplitLayerTest, TestGPUGradientInPlace) {
SplitLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp
index 7829b943..d60d04e8 100644
--- a/src/caffe/test/test_stochastic_pooling.cpp
+++ b/src/caffe/test/test_stochastic_pooling.cpp
@@ -160,7 +160,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) {
GradientChecker<TypeParam> checker(1e-2, 1e-3);
// it is too expensive to call curand multiple times, so we don't do an
// exhaustive gradient check.
- checker.CheckGradient(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+ checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp
index 9c80ac24..6248e508 100644
--- a/src/caffe/test/test_tanh_layer.cpp
+++ b/src/caffe/test/test_tanh_layer.cpp
@@ -70,8 +70,8 @@ TYPED_TEST(TanHLayerTest, TestGradientCPU) {
Caffe::set_mode(Caffe::CPU);
TanHLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
TYPED_TEST(TanHLayerTest, TestForwardGPU) {
@@ -102,8 +102,8 @@ TYPED_TEST(TanHLayerTest, TestGradientGPU) {
Caffe::set_mode(Caffe::GPU);
TanHLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_,
- this->blob_top_vec_);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
} // namespace caffe