summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRonghang Hu <huronghang@hotmail.com>2015-08-22 16:17:12 -0700
committerRonghang Hu <huronghang@hotmail.com>2015-08-22 16:17:12 -0700
commit0dfc5dac3d8bf17f833e21ae6ce7bc3ea19a03fa (patch)
treec8d3637ea5337e0d218f2f82f1a555a5224062cc /src
parent12e14324c7db3c6d402c751bef1d41b9535f72b6 (diff)
parent374fb8c79c3f23ee36c46d0bcaeb2176037aa4b8 (diff)
downloadcaffeonacl-0dfc5dac3d8bf17f833e21ae6ce7bc3ea19a03fa.tar.gz
caffeonacl-0dfc5dac3d8bf17f833e21ae6ce7bc3ea19a03fa.tar.bz2
caffeonacl-0dfc5dac3d8bf17f833e21ae6ce7bc3ea19a03fa.zip
Merge pull request #2935 from rmanor/accuracies
Output accuracies per class.
Diffstat (limited to 'src')
-rw-r--r--src/caffe/layers/accuracy_layer.cpp20
-rw-r--r--src/caffe/test/test_accuracy_layer.cpp107
2 files changed, 127 insertions, 0 deletions
diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp
index 90aad675..e2d8d9f8 100644
--- a/src/caffe/layers/accuracy_layer.cpp
+++ b/src/caffe/layers/accuracy_layer.cpp
@@ -38,6 +38,13 @@ void AccuracyLayer<Dtype>::Reshape(
<< "with integer values in {0, 1, ..., C-1}.";
vector<int> top_shape(0); // Accuracy is a scalar; 0 axes.
top[0]->Reshape(top_shape);
+ if (top.size() > 1) {
+ // Per-class accuracy is a vector; 1 axes.
+ vector<int> top_shape_per_class(1);
+ top_shape_per_class[0] = bottom[0]->shape(label_axis_);
+ top[1]->Reshape(top_shape_per_class);
+ nums_buffer_.Reshape(top_shape_per_class);
+ }
}
template <typename Dtype>
@@ -50,6 +57,10 @@ void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const int num_labels = bottom[0]->shape(label_axis_);
vector<Dtype> maxval(top_k_+1);
vector<int> max_id(top_k_+1);
+ if (top.size() > 1) {
+ caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data());
+ caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data());
+ }
int count = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
@@ -58,6 +69,7 @@ void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
+ if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value];
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, num_labels);
// Top-k accuracy
@@ -73,6 +85,7 @@ void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
for (int k = 0; k < top_k_; k++) {
if (bottom_data_vector[k].second == label_value) {
++accuracy;
+ if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value];
break;
}
}
@@ -82,6 +95,13 @@ void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
// LOG(INFO) << "Accuracy: " << accuracy;
top[0]->mutable_cpu_data()[0] = accuracy / count;
+ if (top.size() > 1) {
+ for (int i = 0; i < top[1]->count(); ++i) {
+ top[1]->mutable_cpu_data()[i] =
+ nums_buffer_.cpu_data()[i] == 0 ? 0
+ : top[1]->cpu_data()[i] / nums_buffer_.cpu_data()[i];
+ }
+ }
// Accuracy layer should not be used as a loss function.
}
diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp
index c14b67cc..94e529b5 100644
--- a/src/caffe/test/test_accuracy_layer.cpp
+++ b/src/caffe/test/test_accuracy_layer.cpp
@@ -22,6 +22,7 @@ class AccuracyLayerTest : public CPUDeviceTest<Dtype> {
: blob_bottom_data_(new Blob<Dtype>()),
blob_bottom_label_(new Blob<Dtype>()),
blob_top_(new Blob<Dtype>()),
+ blob_top_per_class_(new Blob<Dtype>()),
top_k_(3) {
vector<int> shape(2);
shape[0] = 100;
@@ -34,6 +35,8 @@ class AccuracyLayerTest : public CPUDeviceTest<Dtype> {
blob_bottom_vec_.push_back(blob_bottom_data_);
blob_bottom_vec_.push_back(blob_bottom_label_);
blob_top_vec_.push_back(blob_top_);
+ blob_top_per_class_vec_.push_back(blob_top_);
+ blob_top_per_class_vec_.push_back(blob_top_per_class_);
}
virtual void FillBottoms() {
@@ -56,12 +59,15 @@ class AccuracyLayerTest : public CPUDeviceTest<Dtype> {
delete blob_bottom_data_;
delete blob_bottom_label_;
delete blob_top_;
+ delete blob_top_per_class_;
}
Blob<Dtype>* const blob_bottom_data_;
Blob<Dtype>* const blob_bottom_label_;
Blob<Dtype>* const blob_top_;
+ Blob<Dtype>* const blob_top_per_class_;
vector<Blob<Dtype>*> blob_bottom_vec_;
vector<Blob<Dtype>*> blob_top_vec_;
+ vector<Blob<Dtype>*> blob_top_per_class_vec_;
int top_k_;
};
@@ -90,6 +96,20 @@ TYPED_TEST(AccuracyLayerTest, TestSetupTopK) {
EXPECT_EQ(this->blob_top_->width(), 1);
}
+TYPED_TEST(AccuracyLayerTest, TestSetupOutputPerClass) {
+ LayerParameter layer_param;
+ AccuracyLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_);
+ EXPECT_EQ(this->blob_top_->num(), 1);
+ EXPECT_EQ(this->blob_top_->channels(), 1);
+ EXPECT_EQ(this->blob_top_->height(), 1);
+ EXPECT_EQ(this->blob_top_->width(), 1);
+ EXPECT_EQ(this->blob_top_per_class_->num(), 10);
+ EXPECT_EQ(this->blob_top_per_class_->channels(), 1);
+ EXPECT_EQ(this->blob_top_per_class_->height(), 1);
+ EXPECT_EQ(this->blob_top_per_class_->width(), 1);
+}
+
TYPED_TEST(AccuracyLayerTest, TestForwardCPU) {
LayerParameter layer_param;
AccuracyLayer<TypeParam> layer(layer_param);
@@ -228,4 +248,91 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) {
num_correct_labels / 100.0, 1e-4);
}
+TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) {
+ LayerParameter layer_param;
+ Caffe::set_mode(Caffe::CPU);
+ AccuracyLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_);
+
+ TypeParam max_value;
+ int max_id;
+ int num_correct_labels = 0;
+ const int num_class = this->blob_top_per_class_->num();
+ vector<int> correct_per_class(num_class, 0);
+ vector<int> num_per_class(num_class, 0);
+ for (int i = 0; i < 100; ++i) {
+ max_value = -FLT_MAX;
+ max_id = 0;
+ for (int j = 0; j < 10; ++j) {
+ if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) {
+ max_value = this->blob_bottom_data_->data_at(i, j, 0, 0);
+ max_id = j;
+ }
+ }
+ ++num_per_class[this->blob_bottom_label_->data_at(i, 0, 0, 0)];
+ if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) {
+ ++num_correct_labels;
+ ++correct_per_class[max_id];
+ }
+ }
+ EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0),
+ num_correct_labels / 100.0, 1e-4);
+ for (int i = 0; i < num_class; ++i) {
+ EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0),
+ static_cast<float>(correct_per_class[i]) / num_per_class[i],
+ 1e-4);
+ }
+}
+
+
+TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) {
+ LayerParameter layer_param;
+ Caffe::set_mode(Caffe::CPU);
+ const TypeParam kIgnoreLabelValue = -1;
+ layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue);
+ AccuracyLayer<TypeParam> layer(layer_param);
+ // Manually set some labels to the ignore label value (-1).
+ this->blob_bottom_label_->mutable_cpu_data()[2] = kIgnoreLabelValue;
+ this->blob_bottom_label_->mutable_cpu_data()[5] = kIgnoreLabelValue;
+ this->blob_bottom_label_->mutable_cpu_data()[32] = kIgnoreLabelValue;
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_);
+
+ TypeParam max_value;
+ int max_id;
+ int num_correct_labels = 0;
+ const int num_class = this->blob_top_per_class_->num();
+ vector<int> correct_per_class(num_class, 0);
+ vector<int> num_per_class(num_class, 0);
+ int count = 0;
+ for (int i = 0; i < 100; ++i) {
+ if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) {
+ continue;
+ }
+ ++count;
+ max_value = -FLT_MAX;
+ max_id = 0;
+ for (int j = 0; j < 10; ++j) {
+ if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) {
+ max_value = this->blob_bottom_data_->data_at(i, j, 0, 0);
+ max_id = j;
+ }
+ }
+ ++num_per_class[this->blob_bottom_label_->data_at(i, 0, 0, 0)];
+ if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) {
+ ++num_correct_labels;
+ ++correct_per_class[max_id];
+ }
+ }
+ EXPECT_EQ(count, 97);
+ EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0),
+ num_correct_labels / TypeParam(count), 1e-4);
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0),
+ TypeParam(correct_per_class[i]) / num_per_class[i],
+ 1e-4);
+ }
+}
+
} // namespace caffe